aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--CREDITS1
-rw-r--r--Documentation/.gitignore1
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc58
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity-as39352
-rw-r--r--Documentation/DocBook/Makefile9
-rw-r--r--Documentation/DocBook/device-drivers.tmpl36
-rw-r--r--Documentation/DocBook/gpu.tmpl3540
-rw-r--r--Documentation/Makefile.sphinx63
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/conf.py414
-rw-r--r--Documentation/devicetree/bindings/display/arm,malidp.txt65
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt35
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.txt1
-rw-r--r--Documentation/devicetree/bindings/display/imx/ldb.txt1
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt148
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ina2xx.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt6
-rw-r--r--Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt8
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/dmaengine/provider.txt2
-rw-r--r--Documentation/filesystems/devpts.txt145
-rw-r--r--Documentation/gpu/drm-internals.rst378
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst260
-rw-r--r--Documentation/gpu/drm-kms.rst656
-rw-r--r--Documentation/gpu/drm-mm.rst454
-rw-r--r--Documentation/gpu/drm-uapi.rst111
-rw-r--r--Documentation/gpu/i915.rst347
-rw-r--r--Documentation/gpu/index.rst14
-rw-r--r--Documentation/gpu/introduction.rst51
-rw-r--r--Documentation/gpu/kms-properties.csv128
-rw-r--r--Documentation/gpu/vga-switcheroo.rst102
-rw-r--r--Documentation/index.rst24
-rw-r--r--Documentation/kdump/gdbmacros.txt93
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/leds/leds-class.txt4
-rw-r--r--Documentation/mic/mpssd/mpssd.c4
-rw-r--r--Documentation/networking/dsa/dsa.txt17
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--Documentation/scsi/scsi_eh.txt8
-rw-r--r--Documentation/security/keys.txt5
-rw-r--r--Documentation/security/self-protection.txt28
-rw-r--r--Documentation/sphinx/convert_template.sed18
-rw-r--r--Documentation/sphinx/kernel-doc.py127
-rw-r--r--Documentation/sphinx/post_convert.sed23
-rwxr-xr-xDocumentation/sphinx/tmplcvt19
-rw-r--r--Documentation/sync_file.txt6
-rw-r--r--Documentation/zh_CN/CodingStyle581
-rw-r--r--MAINTAINERS107
-rw-r--r--Makefile7
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/include/asm/pgalloc.h4
-rw-r--r--arch/arc/Kconfig31
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi2
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi2
-rw-r--r--arch/arc/boot/dts/axc001.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/eznps.dts1
-rw-r--r--arch/arc/boot/dts/nsim_700.dts1
-rw-r--r--arch/arc/boot/dts/nsimosci.dts15
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts15
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts15
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi1
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi13
-rw-r--r--arch/arc/boot/dts/vdk_hs38_smp.dts2
-rw-r--r--arch/arc/configs/nsimosci_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig7
-rw-r--r--arch/arc/include/asm/atomic.h45
-rw-r--r--arch/arc/include/asm/entry-compact.h4
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/spinlock.h292
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/uaccess.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-compact.S18
-rw-r--r--arch/arc/kernel/intc-compact.c6
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c2
-rw-r--r--arch/arc/mm/cache.c6
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi32
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts8
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts9
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi13
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts13
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi11
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts6
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi48
-rw-r--r--arch/arm/boot/dts/omap5-igep0050.dts26
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts10
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts1
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi3
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S6
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c5
-rw-r--r--arch/arm/mach-omap1/include/mach/ams-delta-fiq.h2
-rw-r--r--arch/arm/mach-omap2/Kconfig12
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c4
-rw-r--r--arch/arm/mach-omap2/display.c2
-rw-r--r--arch/arm/mach-omap2/display.h5
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/omap-secure.h1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c48
-rw-r--r--arch/arm/mach-omap2/powerdomain.c9
-rw-r--r--arch/arm/mach-omap2/powerdomains7xx_data.c76
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-vexpress/spc.c2
-rw-r--r--arch/arm/plat-samsung/devs.c2
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/Kconfig.debug25
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi2
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/kgdb.h45
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/smp.h12
-rw-r--r--arch/arm64/include/asm/spinlock.h42
-rw-r--r--arch/arm64/include/asm/uaccess.h13
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/cpuinfo.c8
-rw-r--r--arch/arm64/kernel/hibernate.c6
-rw-r--r--arch/arm64/kernel/kgdb.c14
-rw-r--r--arch/arm64/kernel/smp.c18
-rw-r--r--arch/arm64/kernel/traps.c31
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c36
-rw-r--r--arch/arm64/kvm/sys_regs.c13
-rw-r--r--arch/arm64/mm/context.c9
-rw-r--r--arch/arm64/mm/dump.c8
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/avr32/include/asm/pgalloc.h6
-rw-r--r--arch/cris/include/asm/pgalloc.h4
-rw-r--r--arch/frv/mm/pgalloc.c6
-rw-r--r--arch/hexagon/include/asm/pgalloc.h4
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/thread_info.h8
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/metag/include/asm/pgalloc.h5
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/mm/pgtable.c3
-rw-r--r--arch/mips/include/asm/kvm_host.h3
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/kvm/emulate.c19
-rw-r--r--arch/mips/kvm/interrupt.h1
-rw-r--r--arch/mips/kvm/locore.S1
-rw-r--r--arch/mips/kvm/mips.c11
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/kgdb.c3
-rw-r--r--arch/mn10300/mm/pgtable.c6
-rw-r--r--arch/nios2/include/asm/pgalloc.h5
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/include/asm/pgalloc.h7
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/parisc/kernel/unaligned.c13
-rw-r--r--arch/parisc/kernel/unwind.c22
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h14
-rw-r--r--arch/powerpc/include/asm/book3s/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h10
-rw-r--r--arch/powerpc/include/asm/reg.h6
-rw-r--r--arch/powerpc/kernel/eeh_driver.c7
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S7
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c32
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c32
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
-rw-r--r--arch/powerpc/mm/tlb-radix.c92
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c49
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/s390/configs/default_defconfig44
-rw-r--r--arch/s390/configs/gcov_defconfig34
-rw-r--r--arch/s390/configs/performance_defconfig36
-rw-r--r--arch/s390/configs/zfcpdump_defconfig4
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c8
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c3
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/score/include/asm/pgalloc.h5
-rw-r--r--arch/sh/include/asm/pgalloc.h4
-rw-r--r--arch/sh/mm/pgtable.c2
-rw-r--r--arch/sparc/include/asm/head_64.h4
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h6
-rw-r--r--arch/sparc/include/asm/ttable.h8
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/rtrap_64.S57
-rw-r--r--arch/sparc/kernel/signal32.c46
-rw-r--r--arch/sparc/kernel/signal_32.c41
-rw-r--r--arch/sparc/kernel/signal_64.c31
-rw-r--r--arch/sparc/kernel/sigutil_32.c9
-rw-r--r--arch/sparc/kernel/sigutil_64.c10
-rw-r--r--arch/sparc/kernel/urtt_fill.S98
-rw-r--r--arch/sparc/mm/init_64.c16
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/tile/mm/pgtable.c2
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/unicore32/include/asm/pgalloc.h2
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/boot/Makefile3
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c21
-rw-r--r--arch/x86/include/asm/intel-family.h68
-rw-r--r--arch/x86/include/asm/kprobes.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h11
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/pgalloc.h4
-rw-r--r--arch/x86/include/asm/stacktrace.h6
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/dumpstack.c22
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/early-quirks.c404
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c12
-rw-r--r--arch/x86/kernel/traps.c20
-rw-r--r--arch/x86/kvm/cpuid.c22
-rw-r--r--arch/x86/kvm/mmu.c8
-rw-r--r--arch/x86/kvm/svm.c21
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/xen/mmu.c74
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-mq.c17
-rw-r--r--crypto/asymmetric_keys/Kconfig1
-rw-r--r--drivers/acpi/acpi_processor.c9
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c169
-rw-r--r--drivers/acpi/acpica/nsparse.c9
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/ec.c29
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/processor_throttling.c9
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/module.c8
-rw-r--r--drivers/base/power/opp/cpu.c12
-rw-r--r--drivers/base/power/opp/of.c10
-rw-r--r--drivers/base/power/opp/opp.h8
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/xen-blkfront.c35
-rw-r--r--drivers/char/agp/intel-gtt.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c10
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c36
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c17
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/devfreq/devfreq.c27
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c51
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/reservation.c72
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/at_xdmac.c82
-rw-r--r--drivers/dma/mv_xor.c10
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/sb_edac.c35
-rw-r--r--drivers/extcon/extcon-palmas.c2
-rw-r--r--drivers/firmware/efi/arm-init.c14
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c4
-rw-r--r--drivers/gpio/gpio-104-idi-48.c1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpio-zynq.c7
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpio/gpiolib.c78
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h72
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c256
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c145
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74.h75
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c51
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c7
-rw-r--r--drivers/gpu/drm/arc/Makefile2
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c52
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c18
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c128
-rw-r--r--drivers/gpu/drm/arm/Kconfig16
-rw-r--r--drivers/gpu/drm/arm/Makefile2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c105
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c78
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h5
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c216
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c512
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h54
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c691
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h241
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c298
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h172
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c14
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c22
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c11
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c467
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_atomic.c132
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c566
-rw-r--r--drivers/gpu/drm/drm_auth.c285
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c623
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c90
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h88
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c56
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c8
-rw-r--r--drivers/gpu/drm/drm_drv.c241
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c45
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c56
-rw-r--r--drivers/gpu/drm/drm_fops.c149
-rw-r--r--drivers/gpu/drm/drm_fourcc.c320
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c12
-rw-r--r--drivers/gpu/drm/drm_info.c117
-rw-r--r--drivers/gpu/drm/drm_internal.h21
-rw-r--r--drivers/gpu/drm/drm_ioctl.c124
-rw-r--r--drivers/gpu/drm/drm_irq.c215
-rw-r--r--drivers/gpu/drm/drm_legacy.h8
-rw-r--r--drivers/gpu/drm/drm_lock.c240
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_pci.c51
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c18
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c206
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vm.c54
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c44
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c48
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c13
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c16
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c20
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig22
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c145
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h69
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h38
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h49
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c53
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c179
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c171
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h457
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c777
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c500
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.h45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c310
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c81
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c521
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c522
-rw-r--r--drivers/gpu/drm/i915/i915_params.c23
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h113
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h76
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h48
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c44
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h92
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c67
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c216
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1982
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c511
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h242
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c63
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c77
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c21
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c49
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c41
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h45
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c200
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c100
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h45
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c360
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c13
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c828
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c109
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1244
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c53
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c627
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c199
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c57
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c221
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h7
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c78
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c5
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c40
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig9
-rw-r--r--drivers/gpu/drm/mediatek/Makefile7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c265
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1828
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c358
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs.h238
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c515
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c9
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c8
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c23
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c136
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c67
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c256
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c79
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h871
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c56
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c15
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c203
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c72
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c69
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c20
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c81
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c39
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c148
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h1
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c42
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c46
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c43
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c350
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c39
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c40
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h9
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c43
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c12
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c60
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c53
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c29
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c181
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c34
-rw-r--r--drivers/hid/hid-elo.c2
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/usbhid/hiddev.c10
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c85
-rw-r--r--drivers/hwmon/fam15h_power.c8
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c11
-rw-r--r--drivers/hwtracing/coresight/coresight.c15
-rw-r--r--drivers/i2c/busses/i2c-i801.c99
-rw-r--r--drivers/i2c/busses/i2c-octeon.c17
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c1
-rw-r--r--drivers/iio/accel/st_accel_buffer.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c25
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c96
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/humidity/am2315.c4
-rw-r--r--drivers/iio/humidity/hdc100x.c20
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c16
-rw-r--r--drivers/iio/industrialio-trigger.c23
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/bh1780.c10
-rw-r--r--drivers/iio/light/max44000.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280.c4
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c81
-rw-r--r--drivers/iio/proximity/as3935.c17
-rw-r--r--drivers/infiniband/core/cache.c14
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c62
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwpm_msg.c2
-rw-r--r--drivers/infiniband/core/mad.c6
-rw-r--r--drivers/infiniband/core/sysfs.c24
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c16
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c31
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c34
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c3
-rw-r--r--drivers/infiniband/hw/hfi1/init.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c19
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c26
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c13
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c6
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c17
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c24
-rw-r--r--drivers/infiniband/hw/mlx4/main.c7
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c34
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c48
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c20
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/iommu/arm-smmu-v3.c1
-rw-r--r--drivers/iommu/intel-iommu.c17
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c49
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/irqchip/irq-pic32-evic.c2
-rw-r--r--drivers/leds/led-core.c9
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/mcb/mcb-core.c17
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c97
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/eba.c22
-rw-r--r--drivers/mtd/ubi/kapi.c8
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c168
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/ethoc.c7
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/smsc95xx.c51
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c6
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/irq.c19
-rw-r--r--drivers/of/of_reserved_mem.c11
-rw-r--r--drivers/pci/vc.c4
-rw-r--r--drivers/perf/arm_pmu.c14
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c6
-rw-r--r--drivers/phy/phy-ti-pipe3.c15
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c87
-rw-r--r--drivers/ptp/ptp_chardev.c12
-rw-r--r--drivers/pwm/core.c3
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/sysfs.c2
-rw-r--r--drivers/regulator/qcom_smd-regulator.c15
-rw-r--r--drivers/regulator/tps51632-regulator.c9
-rw-r--r--drivers/scsi/53c700.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/staging/android/sync.h3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c3
-rw-r--r--drivers/thermal/cpu_cooling.c16
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/tty/Kconfig11
-rw-r--r--drivers/tty/pty.c15
-rw-r--r--drivers/usb/core/quirks.c23
-rw-r--r--drivers/usb/dwc2/core.h27
-rw-r--r--drivers/usb/dwc2/gadget.c24
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c19
-rw-r--r--drivers/usb/dwc3/dwc3-st.c10
-rw-r--r--drivers/usb/dwc3/gadget.c30
-rw-r--r--drivers/usb/gadget/composite.c21
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c13
-rw-r--r--drivers/usb/gadget/function/storage_common.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c17
-rw-r--r--drivers/usb/gadget/udc/udc-core.c12
-rw-r--r--drivers/usb/host/ehci-hcd.c9
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-msm.c14
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/ohci-q.c3
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c29
-rw-r--r--drivers/usb/musb/musb_core.c85
-rw-r--r--drivers/usb/musb/musb_core.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c34
-rw-r--r--drivers/usb/musb/musb_host.c68
-rw-r--r--drivers/usb/musb/omap2430.c257
-rw-r--r--drivers/usb/musb/sunxi.c54
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c29
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c60
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c54
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c47
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c45
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h2
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/balloon.c28
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c18
-rw-r--r--fs/autofs4/autofs_i.h8
-rw-r--r--fs/autofs4/expire.c27
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/autofs4/waitq.c7
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c19
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/delayed-inode.c27
-rw-r--r--fs/btrfs/delayed-inode.h10
-rw-r--r--fs/btrfs/disk-io.c54
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c33
-rw-r--r--fs/btrfs/extent_io.c42
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file.c44
-rw-r--r--fs/btrfs/free-space-cache.c18
-rw-r--r--fs/btrfs/hash.c5
-rw-r--r--fs/btrfs/hash.h1
-rw-r--r--fs/btrfs/inode.c35
-rw-r--r--fs/btrfs/ordered-data.c9
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/btrfs/scrub.c50
-rw-r--r--fs/btrfs/super.c61
-rw-r--r--fs/btrfs/tests/btrfs-tests.c8
-rw-r--r--fs/btrfs/tests/btrfs-tests.h27
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c13
-rw-r--r--fs/btrfs/tests/extent-io-tests.c86
-rw-r--r--fs/btrfs/tests/free-space-tests.c76
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c30
-rw-r--r--fs/btrfs/tests/inode-tests.c344
-rw-r--r--fs/btrfs/tests/qgroup-tests.c111
-rw-r--r--fs/btrfs/transaction.c10
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c149
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c6
-rw-r--r--fs/ceph/cache.c141
-rw-r--r--fs/ceph/cache.h44
-rw-r--r--fs/ceph/caps.c23
-rw-r--r--fs/ceph/file.c27
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/coredump.c4
-rw-r--r--fs/dcache.c79
-rw-r--r--fs/debugfs/file.c7
-rw-r--r--fs/devpts/inode.c191
-rw-r--r--fs/ecryptfs/kthread.c13
-rw-r--r--fs/fscache/page.c2
-rw-r--r--fs/internal.h1
-rw-r--r--fs/jbd2/journal.c32
-rw-r--r--fs/libfs.c4
-rw-r--r--fs/namei.c110
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/nfs2acl.c20
-rw-r--r--fs/nfsd/nfs3acl.c16
-rw-r--r--fs/nfsd/nfs4acl.c16
-rw-r--r--fs/nfsd/nfs4callback.c18
-rw-r--r--fs/nfsd/nfs4state.c67
-rw-r--r--fs/nfsd/state.h2
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/ocfs2/Makefile2
-rw-r--r--fs/ocfs2/buffer_head_io.c5
-rw-r--r--fs/overlayfs/dir.c13
-rw-r--r--fs/overlayfs/inode.c26
-rw-r--r--fs/posix_acl.c42
-rw-r--r--fs/proc/root.c7
-rw-r--r--fs/reiserfs/super.c9
-rw-r--r--fs/ubifs/file.c24
-rw-r--r--fs/udf/partition.c13
-rw-r--r--fs/udf/super.c22
-rw-r--r--fs/udf/udf_sb.h5
-rw-r--r--include/acpi/video.h6
-rw-r--r--include/asm-generic/qspinlock.h53
-rw-r--r--include/drm/drmP.h94
-rw-r--r--include/drm/drm_atomic.h82
-rw-r--r--include/drm/drm_atomic_helper.h42
-rw-r--r--include/drm/drm_auth.h59
-rw-r--r--include/drm/drm_crtc.h625
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h2
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h11
-rw-r--r--include/drm/drm_fourcc.h37
-rw-r--r--include/drm/drm_legacy.h2
-rw-r--r--include/drm/drm_mipi_dsi.h3
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h49
-rw-r--r--include/drm/drm_plane_helper.h1
-rw-r--r--include/drm/drm_simple_kms_helper.h94
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/intel-gtt.h3
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/ceph/osd_client.h5
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/dcache.h13
-rw-r--r--include/linux/devpts_fs.h9
-rw-r--r--include/linux/dma-buf.h15
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/fence-array.h73
-rw-r--r--include/linux/fence.h15
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/io-mapping.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h6
-rw-r--r--include/linux/isa.h5
-rw-r--r--include/linux/jump_label.h16
-rw-r--r--include/linux/kasan.h11
-rw-r--r--include/linux/leds.h23
-rw-r--r--include/linux/mlx5/device.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h12
-rw-r--r--include/linux/mlx5/qp.h7
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/of_reserved_mem.h7
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/platform_data/omapdss.h37
-rw-r--r--include/linux/pwm.h3
-rw-r--r--include/linux/reservation.h53
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sctp.h2
-rw-r--r--include/linux/seqlock.h7
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/timekeeping.h3
-rw-r--r--include/linux/usb/gadget.h3
-rw-r--r--include/linux/usb/musb.h5
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/media/v4l2-mc.h2
-rw-r--r--include/net/compat.h1
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/netfilter/nf_queue.h4
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/pkt_cls.h10
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--include/rdma/ib_verbs.h6
-rw-r--r--include/rdma/rdma_vt.h4
-rw-r--r--include/sound/omap-hdmi-audio.h9
-rw-r--r--include/uapi/linux/btrfs.h2
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/pkt_cls.h4
-rw-r--r--include/uapi/sound/Kbuild3
-rw-r--r--include/video/omap-panel-data.h157
-rw-r--r--include/video/omapfb_dss.h (renamed from include/video/omapdss.h)80
-rw-r--r--init/main.c8
-rw-r--r--kernel/bpf/inode.c1
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/fork.c50
-rw-r--r--kernel/futex.c14
-rw-r--r--kernel/irq/ipi.c2
-rw-r--r--kernel/jump_label.c36
-rw-r--r--kernel/kcov.c7
-rw-r--r--kernel/locking/mutex-debug.c12
-rw-r--r--kernel/locking/mutex-debug.h4
-rw-r--r--kernel/locking/mutex.c15
-rw-r--r--kernel/locking/mutex.h2
-rw-r--r--kernel/locking/qspinlock.c60
-rw-r--r--kernel/power/process.c12
-rw-r--r--kernel/relay.c1
-rw-r--r--kernel/sched/core.c42
-rw-r--r--kernel/sched/debug.c15
-rw-r--r--kernel/sched/fair.c72
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/stats.h3
-rw-r--r--kernel/time/hrtimer.c1
-rw-r--r--kernel/trace/bpf_trace.c4
-rw-r--r--kernel/trace/trace_printk.c7
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_uuid.c133
-rw-r--r--lib/uuid.c4
-rw-r--r--mm/compaction.c39
-rw-r--r--mm/fadvise.c11
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c46
-rw-r--r--mm/internal.h3
-rw-r--r--mm/kasan/kasan.c10
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memcontrol.c12
-rw-r--r--mm/memory.c31
-rw-r--r--mm/mempool.c12
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c14
-rw-r--r--mm/page-writeback.c21
-rw-r--r--mm/page_alloc.c39
-rw-r--r--mm/page_owner.c32
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/percpu.c73
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap.c31
-rw-r--r--mm/swap_state.c5
-rw-r--r--mm/vmalloc.c9
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/z3fold.c24
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c4
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/ceph/osd_client.c51
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/compat.c20
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/hwbm.c3
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/ieee802154/nl802154.c4
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c4
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/Kconfig9
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_output.c11
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/l2tp/l2tp_ip6.c12
-rw-r--r--net/lapb/lapb_in.c5
-rw-r--r--net/lapb/lapb_out.c4
-rw-r--r--net/lapb/lapb_subr.c14
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c5
-rw-r--r--net/netfilter/nf_conntrack_ftp.c1
-rw-r--r--net/netfilter/nf_conntrack_helper.c9
-rw-r--r--net/netfilter/nf_conntrack_irc.c1
-rw-r--r--net/netfilter/nf_conntrack_sane.c1
-rw-r--r--net/netfilter/nf_conntrack_sip.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_conntrack_tftp.c1
-rw-r--r--net/netfilter/nf_queue.c17
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c20
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/openvswitch/actions.c20
-rw-r--r--net/packet/af_packet.c25
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rds/tcp.c78
-rw-r--r--net/rds/tcp.h1
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_listen.c20
-rw-r--r--net/rds/threads.c10
-rw-r--r--net/rxrpc/rxkad.c4
-rw-r--r--net/sched/act_police.c40
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/cls_u32.c72
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_fq_codel.c26
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sched/sch_ingress.c12
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sctp/sctp_diag.c3
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/sunrpc/clnt.c31
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/netlink_compat.c114
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/wext-core.c25
-rwxr-xr-xscripts/checkpatch.pl1
-rwxr-xr-xscripts/kernel-doc393
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--security/keys/compat.c2
-rw-r--r--security/keys/dh.c8
-rw-r--r--security/keys/internal.h5
-rw-r--r--security/keys/key.c2
-rw-r--r--security/keys/keyctl.c4
-rw-r--r--sound/drivers/dummy.c1
-rw-r--r--sound/hda/hdac_regmap.c4
-rw-r--r--sound/pci/hda/hda_intel.c11
-rw-r--r--sound/pci/hda/hda_tegra.c20
-rw-r--r--sound/pci/hda/patch_realtek.c79
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c1
-rw-r--r--tools/perf/util/data-convert-bt.c41
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/symbol.c16
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc9
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c10
-rw-r--r--tools/testing/selftests/vm/compaction_test.c2
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/README4
-rw-r--r--tools/virtio/ringtest/noring.c69
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh4
-rw-r--r--tools/vm/slabinfo.c2
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c14
-rw-r--r--virt/kvm/irqchip.c2
-rw-r--r--virt/kvm/kvm_main.c24
1414 files changed, 34606 insertions, 19992 deletions
diff --git a/.mailmap b/.mailmap
index 08b80428f583..52489f564069 100644
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
21Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
22Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
23Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
24Antoine Tenart <antoine.tenart@free-electrons.com>
24Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> 25Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
25Archit Taneja <archit@ti.com> 26Archit Taneja <archit@ti.com>
26Arnaud Patard <arnaud.patard@rtp-net.org> 27Arnaud Patard <arnaud.patard@rtp-net.org>
@@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
30Ben Gardner <bgardner@wabtec.com> 31Ben Gardner <bgardner@wabtec.com>
31Ben M Cahill <ben.m.cahill@intel.com> 32Ben M Cahill <ben.m.cahill@intel.com>
32Björn Steinbrink <B.Steinbrink@gmx.de> 33Björn Steinbrink <B.Steinbrink@gmx.de>
34Boris Brezillon <boris.brezillon@free-electrons.com>
35Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
36Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
33Brian Avery <b.avery@hp.com> 37Brian Avery <b.avery@hp.com>
34Brian King <brking@us.ibm.com> 38Brian King <brking@us.ibm.com>
35Christoph Hellwig <hch@lst.de> 39Christoph Hellwig <hch@lst.de>
@@ -89,6 +93,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
89Linas Vepstas <linas@austin.ibm.com> 93Linas Vepstas <linas@austin.ibm.com>
90Mark Brown <broonie@sirena.org.uk> 94Mark Brown <broonie@sirena.org.uk>
91Matthieu CASTET <castet.matthieu@free.fr> 95Matthieu CASTET <castet.matthieu@free.fr>
96Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
92Mayuresh Janorkar <mayur@ti.com> 97Mayuresh Janorkar <mayur@ti.com>
93Michael Buesch <m@bues.ch> 98Michael Buesch <m@bues.ch>
94Michel Dänzer <michel@tungstengraphics.com> 99Michel Dänzer <michel@tungstengraphics.com>
@@ -122,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
122Sascha Hauer <s.hauer@pengutronix.de> 127Sascha Hauer <s.hauer@pengutronix.de>
123S.Çağlar Onur <caglar@pardus.org.tr> 128S.Çağlar Onur <caglar@pardus.org.tr>
124Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> 129Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
130Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
125Simon Kelley <simon@thekelleys.org.uk> 131Simon Kelley <simon@thekelleys.org.uk>
126Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 132Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
127Stephen Hemminger <shemminger@osdl.org> 133Stephen Hemminger <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 0f0bf22afe0c..2a3fbcd229e6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -649,6 +649,7 @@ D: Configure, Menuconfig, xconfig
649 649
650N: Mauro Carvalho Chehab 650N: Mauro Carvalho Chehab
651E: m.chehab@samsung.org 651E: m.chehab@samsung.org
652E: mchehab@osg.samsung.com
652E: mchehab@infradead.org 653E: mchehab@infradead.org
653D: Media subsystem (V4L/DVB) drivers and core 654D: Media subsystem (V4L/DVB) drivers and core
654D: EDAC drivers and EDAC 3.0 core rework 655D: EDAC drivers and EDAC 3.0 core rework
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
new file mode 100644
index 000000000000..53752db253e3
--- /dev/null
+++ b/Documentation/.gitignore
@@ -0,0 +1 @@
output
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 2f4a0051b32d..1ba0d0fda9c0 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -1,6 +1,6 @@
1What: /config/usb-gadget/gadget/functions/uvc.name 1What: /config/usb-gadget/gadget/functions/uvc.name
2Date: Dec 2014 2Date: Dec 2014
3KernelVersion: 3.20 3KernelVersion: 4.0
4Description: UVC function directory 4Description: UVC function directory
5 5
6 streaming_maxburst - 0..15 (ss only) 6 streaming_maxburst - 0..15 (ss only)
@@ -9,37 +9,37 @@ Description: UVC function directory
9 9
10What: /config/usb-gadget/gadget/functions/uvc.name/control 10What: /config/usb-gadget/gadget/functions/uvc.name/control
11Date: Dec 2014 11Date: Dec 2014
12KernelVersion: 3.20 12KernelVersion: 4.0
13Description: Control descriptors 13Description: Control descriptors
14 14
15What: /config/usb-gadget/gadget/functions/uvc.name/control/class 15What: /config/usb-gadget/gadget/functions/uvc.name/control/class
16Date: Dec 2014 16Date: Dec 2014
17KernelVersion: 3.20 17KernelVersion: 4.0
18Description: Class descriptors 18Description: Class descriptors
19 19
20What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss 20What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
21Date: Dec 2014 21Date: Dec 2014
22KernelVersion: 3.20 22KernelVersion: 4.0
23Description: Super speed control class descriptors 23Description: Super speed control class descriptors
24 24
25What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs 25What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
26Date: Dec 2014 26Date: Dec 2014
27KernelVersion: 3.20 27KernelVersion: 4.0
28Description: Full speed control class descriptors 28Description: Full speed control class descriptors
29 29
30What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal 30What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal
31Date: Dec 2014 31Date: Dec 2014
32KernelVersion: 3.20 32KernelVersion: 4.0
33Description: Terminal descriptors 33Description: Terminal descriptors
34 34
35What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output 35What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
36Date: Dec 2014 36Date: Dec 2014
37KernelVersion: 3.20 37KernelVersion: 4.0
38Description: Output terminal descriptors 38Description: Output terminal descriptors
39 39
40What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default 40What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
41Date: Dec 2014 41Date: Dec 2014
42KernelVersion: 3.20 42KernelVersion: 4.0
43Description: Default output terminal descriptors 43Description: Default output terminal descriptors
44 44
45 All attributes read only: 45 All attributes read only:
@@ -53,12 +53,12 @@ Description: Default output terminal descriptors
53 53
54What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera 54What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
55Date: Dec 2014 55Date: Dec 2014
56KernelVersion: 3.20 56KernelVersion: 4.0
57Description: Camera terminal descriptors 57Description: Camera terminal descriptors
58 58
59What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default 59What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
60Date: Dec 2014 60Date: Dec 2014
61KernelVersion: 3.20 61KernelVersion: 4.0
62Description: Default camera terminal descriptors 62Description: Default camera terminal descriptors
63 63
64 All attributes read only: 64 All attributes read only:
@@ -75,12 +75,12 @@ Description: Default camera terminal descriptors
75 75
76What: /config/usb-gadget/gadget/functions/uvc.name/control/processing 76What: /config/usb-gadget/gadget/functions/uvc.name/control/processing
77Date: Dec 2014 77Date: Dec 2014
78KernelVersion: 3.20 78KernelVersion: 4.0
79Description: Processing unit descriptors 79Description: Processing unit descriptors
80 80
81What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default 81What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
82Date: Dec 2014 82Date: Dec 2014
83KernelVersion: 3.20 83KernelVersion: 4.0
84Description: Default processing unit descriptors 84Description: Default processing unit descriptors
85 85
86 All attributes read only: 86 All attributes read only:
@@ -94,49 +94,49 @@ Description: Default processing unit descriptors
94 94
95What: /config/usb-gadget/gadget/functions/uvc.name/control/header 95What: /config/usb-gadget/gadget/functions/uvc.name/control/header
96Date: Dec 2014 96Date: Dec 2014
97KernelVersion: 3.20 97KernelVersion: 4.0
98Description: Control header descriptors 98Description: Control header descriptors
99 99
100What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name 100What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name
101Date: Dec 2014 101Date: Dec 2014
102KernelVersion: 3.20 102KernelVersion: 4.0
103Description: Specific control header descriptors 103Description: Specific control header descriptors
104 104
105dwClockFrequency 105dwClockFrequency
106bcdUVC 106bcdUVC
107What: /config/usb-gadget/gadget/functions/uvc.name/streaming 107What: /config/usb-gadget/gadget/functions/uvc.name/streaming
108Date: Dec 2014 108Date: Dec 2014
109KernelVersion: 3.20 109KernelVersion: 4.0
110Description: Streaming descriptors 110Description: Streaming descriptors
111 111
112What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class 112What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
113Date: Dec 2014 113Date: Dec 2014
114KernelVersion: 3.20 114KernelVersion: 4.0
115Description: Streaming class descriptors 115Description: Streaming class descriptors
116 116
117What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss 117What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
118Date: Dec 2014 118Date: Dec 2014
119KernelVersion: 3.20 119KernelVersion: 4.0
120Description: Super speed streaming class descriptors 120Description: Super speed streaming class descriptors
121 121
122What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs 122What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
123Date: Dec 2014 123Date: Dec 2014
124KernelVersion: 3.20 124KernelVersion: 4.0
125Description: High speed streaming class descriptors 125Description: High speed streaming class descriptors
126 126
127What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs 127What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
128Date: Dec 2014 128Date: Dec 2014
129KernelVersion: 3.20 129KernelVersion: 4.0
130Description: Full speed streaming class descriptors 130Description: Full speed streaming class descriptors
131 131
132What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching 132What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
133Date: Dec 2014 133Date: Dec 2014
134KernelVersion: 3.20 134KernelVersion: 4.0
135Description: Color matching descriptors 135Description: Color matching descriptors
136 136
137What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default 137What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
138Date: Dec 2014 138Date: Dec 2014
139KernelVersion: 3.20 139KernelVersion: 4.0
140Description: Default color matching descriptors 140Description: Default color matching descriptors
141 141
142 All attributes read only: 142 All attributes read only:
@@ -150,12 +150,12 @@ Description: Default color matching descriptors
150 150
151What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg 151What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
152Date: Dec 2014 152Date: Dec 2014
153KernelVersion: 3.20 153KernelVersion: 4.0
154Description: MJPEG format descriptors 154Description: MJPEG format descriptors
155 155
156What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name 156What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
157Date: Dec 2014 157Date: Dec 2014
158KernelVersion: 3.20 158KernelVersion: 4.0
159Description: Specific MJPEG format descriptors 159Description: Specific MJPEG format descriptors
160 160
161 All attributes read only, 161 All attributes read only,
@@ -174,7 +174,7 @@ Description: Specific MJPEG format descriptors
174 174
175What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name 175What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
176Date: Dec 2014 176Date: Dec 2014
177KernelVersion: 3.20 177KernelVersion: 4.0
178Description: Specific MJPEG frame descriptors 178Description: Specific MJPEG frame descriptors
179 179
180 dwFrameInterval - indicates how frame interval can be 180 dwFrameInterval - indicates how frame interval can be
@@ -196,12 +196,12 @@ Description: Specific MJPEG frame descriptors
196 196
197What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed 197What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
198Date: Dec 2014 198Date: Dec 2014
199KernelVersion: 3.20 199KernelVersion: 4.0
200Description: Uncompressed format descriptors 200Description: Uncompressed format descriptors
201 201
202What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name 202What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
203Date: Dec 2014 203Date: Dec 2014
204KernelVersion: 3.20 204KernelVersion: 4.0
205Description: Specific uncompressed format descriptors 205Description: Specific uncompressed format descriptors
206 206
207 bmaControls - this format's data for bmaControls in 207 bmaControls - this format's data for bmaControls in
@@ -221,7 +221,7 @@ Description: Specific uncompressed format descriptors
221 221
222What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name 222What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
223Date: Dec 2014 223Date: Dec 2014
224KernelVersion: 3.20 224KernelVersion: 4.0
225Description: Specific uncompressed frame descriptors 225Description: Specific uncompressed frame descriptors
226 226
227 dwFrameInterval - indicates how frame interval can be 227 dwFrameInterval - indicates how frame interval can be
@@ -243,12 +243,12 @@ Description: Specific uncompressed frame descriptors
243 243
244What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header 244What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header
245Date: Dec 2014 245Date: Dec 2014
246KernelVersion: 3.20 246KernelVersion: 4.0
247Description: Streaming header descriptors 247Description: Streaming header descriptors
248 248
249What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name 249What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
250Date: Dec 2014 250Date: Dec 2014
251KernelVersion: 3.20 251KernelVersion: 4.0
252Description: Specific streaming header descriptors 252Description: Specific streaming header descriptors
253 253
254 All attributes read only: 254 All attributes read only:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
index 6708c5e264aa..33e96f740639 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
@@ -1,4 +1,4 @@
1What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw 1What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
2Date: March 2014 2Date: March 2014
3KernelVersion: 3.15 3KernelVersion: 3.15
4Contact: Matt Ranostay <mranostay@gmail.com> 4Contact: Matt Ranostay <mranostay@gmail.com>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index d70f9b68174e..f4482f9b221f 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 80211.xml debugobjects.xml sh.xml regulator.xml \ 15 80211.xml debugobjects.xml sh.xml regulator.xml \
16 alsa-driver-api.xml writing-an-alsa-driver.xml \ 16 alsa-driver-api.xml writing-an-alsa-driver.xml \
17 tracepoint.xml gpu.xml media_api.xml w1.xml \ 17 tracepoint.xml media_api.xml w1.xml \
18 writing_musb_glue_layer.xml crypto-API.xml iio.xml 18 writing_musb_glue_layer.xml crypto-API.xml iio.xml
19 19
20include Documentation/DocBook/media/Makefile 20include Documentation/DocBook/media/Makefile
@@ -33,10 +33,6 @@ PDF_METHOD = $(prefer-db2x)
33PS_METHOD = $(prefer-db2x) 33PS_METHOD = $(prefer-db2x)
34 34
35 35
36###
37# The targets that may be used.
38PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs
39
40targets += $(DOCBOOKS) 36targets += $(DOCBOOKS)
41BOOKS := $(addprefix $(obj)/,$(DOCBOOKS)) 37BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
42xmldocs: $(BOOKS) 38xmldocs: $(BOOKS)
@@ -63,6 +59,9 @@ installmandocs: mandocs
63 sort -k 2 -k 1 | uniq -f 1 | sed -e 's: :/:' | \ 59 sort -k 2 -k 1 | uniq -f 1 | sed -e 's: :/:' | \
64 xargs install -m 644 -t /usr/local/man/man9/ 60 xargs install -m 644 -t /usr/local/man/man9/
65 61
62# no-op for the DocBook toolchain
63epubdocs:
64
66### 65###
67#External programs used 66#External programs used
68KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref 67KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index de79efdad46c..c3313d45f4d6 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -128,16 +128,48 @@ X!Edrivers/base/interface.c
128!Edrivers/base/platform.c 128!Edrivers/base/platform.c
129!Edrivers/base/bus.c 129!Edrivers/base/bus.c
130 </sect1> 130 </sect1>
131 <sect1><title>Device Drivers DMA Management</title> 131 <sect1>
132 <title>Buffer Sharing and Synchronization</title>
133 <para>
134 The dma-buf subsystem provides the framework for sharing buffers
135 for hardware (DMA) access across multiple device drivers and
136 subsystems, and for synchronizing asynchronous hardware access.
137 </para>
138 <para>
139 This is used, for example, by drm "prime" multi-GPU support, but
140 is of course not limited to GPU use cases.
141 </para>
142 <para>
143 The three main components of this are: (1) dma-buf, representing
144 a sg_table and exposed to userspace as a file descriptor to allow
145 passing between devices, (2) fence, which provides a mechanism
146 to signal when one device as finished access, and (3) reservation,
147 which manages the shared or exclusive fence(s) associated with
148 the buffer.
149 </para>
150 <sect2><title>dma-buf</title>
132!Edrivers/dma-buf/dma-buf.c 151!Edrivers/dma-buf/dma-buf.c
152!Iinclude/linux/dma-buf.h
153 </sect2>
154 <sect2><title>reservation</title>
155!Pdrivers/dma-buf/reservation.c Reservation Object Overview
156!Edrivers/dma-buf/reservation.c
157!Iinclude/linux/reservation.h
158 </sect2>
159 <sect2><title>fence</title>
133!Edrivers/dma-buf/fence.c 160!Edrivers/dma-buf/fence.c
134!Edrivers/dma-buf/seqno-fence.c
135!Iinclude/linux/fence.h 161!Iinclude/linux/fence.h
162!Edrivers/dma-buf/seqno-fence.c
136!Iinclude/linux/seqno-fence.h 163!Iinclude/linux/seqno-fence.h
164!Edrivers/dma-buf/fence-array.c
165!Iinclude/linux/fence-array.h
137!Edrivers/dma-buf/reservation.c 166!Edrivers/dma-buf/reservation.c
138!Iinclude/linux/reservation.h 167!Iinclude/linux/reservation.h
139!Edrivers/dma-buf/sync_file.c 168!Edrivers/dma-buf/sync_file.c
140!Iinclude/linux/sync_file.h 169!Iinclude/linux/sync_file.h
170 </sect2>
171 </sect1>
172 <sect1><title>Device Drivers DMA Management</title>
141!Edrivers/base/dma-coherent.c 173!Edrivers/base/dma-coherent.c
142!Edrivers/base/dma-mapping.c 174!Edrivers/base/dma-mapping.c
143 </sect1> 175 </sect1>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
deleted file mode 100644
index 7586bf75f62e..000000000000
--- a/Documentation/DocBook/gpu.tmpl
+++ /dev/null
@@ -1,3540 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="gpuDevelopersGuide">
6 <bookinfo>
7 <title>Linux GPU Driver Developer's Guide</title>
8
9 <authorgroup>
10 <author>
11 <firstname>Jesse</firstname>
12 <surname>Barnes</surname>
13 <contrib>Initial version</contrib>
14 <affiliation>
15 <orgname>Intel Corporation</orgname>
16 <address>
17 <email>jesse.barnes@intel.com</email>
18 </address>
19 </affiliation>
20 </author>
21 <author>
22 <firstname>Laurent</firstname>
23 <surname>Pinchart</surname>
24 <contrib>Driver internals</contrib>
25 <affiliation>
26 <orgname>Ideas on board SPRL</orgname>
27 <address>
28 <email>laurent.pinchart@ideasonboard.com</email>
29 </address>
30 </affiliation>
31 </author>
32 <author>
33 <firstname>Daniel</firstname>
34 <surname>Vetter</surname>
35 <contrib>Contributions all over the place</contrib>
36 <affiliation>
37 <orgname>Intel Corporation</orgname>
38 <address>
39 <email>daniel.vetter@ffwll.ch</email>
40 </address>
41 </affiliation>
42 </author>
43 <author>
44 <firstname>Lukas</firstname>
45 <surname>Wunner</surname>
46 <contrib>vga_switcheroo documentation</contrib>
47 <affiliation>
48 <address>
49 <email>lukas@wunner.de</email>
50 </address>
51 </affiliation>
52 </author>
53 </authorgroup>
54
55 <copyright>
56 <year>2008-2009</year>
57 <year>2013-2014</year>
58 <holder>Intel Corporation</holder>
59 </copyright>
60 <copyright>
61 <year>2012</year>
62 <holder>Laurent Pinchart</holder>
63 </copyright>
64 <copyright>
65 <year>2015</year>
66 <holder>Lukas Wunner</holder>
67 </copyright>
68
69 <legalnotice>
70 <para>
71 The contents of this file may be used under the terms of the GNU
72 General Public License version 2 (the "GPL") as distributed in
73 the kernel source COPYING file.
74 </para>
75 </legalnotice>
76
77 <revhistory>
78 <!-- Put document revisions here, newest first. -->
79 <revision>
80 <revnumber>1.0</revnumber>
81 <date>2012-07-13</date>
82 <authorinitials>LP</authorinitials>
83 <revremark>Added extensive documentation about driver internals.
84 </revremark>
85 </revision>
86 <revision>
87 <revnumber>1.1</revnumber>
88 <date>2015-10-11</date>
89 <authorinitials>LW</authorinitials>
90 <revremark>Added vga_switcheroo documentation.
91 </revremark>
92 </revision>
93 </revhistory>
94 </bookinfo>
95
96<toc></toc>
97
98<part id="drmCore">
99 <title>DRM Core</title>
100 <partintro>
101 <para>
102 This first part of the GPU Driver Developer's Guide documents core DRM
103 code, helper libraries for writing drivers and generic userspace
104 interfaces exposed by DRM drivers.
105 </para>
106 </partintro>
107
108 <chapter id="drmIntroduction">
109 <title>Introduction</title>
110 <para>
111 The Linux DRM layer contains code intended to support the needs
112 of complex graphics devices, usually containing programmable
113 pipelines well suited to 3D graphics acceleration. Graphics
114 drivers in the kernel may make use of DRM functions to make
115 tasks like memory management, interrupt handling and DMA easier,
116 and provide a uniform interface to applications.
117 </para>
118 <para>
119 A note on versions: this guide covers features found in the DRM
120 tree, including the TTM memory manager, output configuration and
121 mode setting, and the new vblank internals, in addition to all
122 the regular features found in current kernels.
123 </para>
124 <para>
125 [Insert diagram of typical DRM stack here]
126 </para>
127 <sect1>
128 <title>Style Guidelines</title>
129 <para>
130 For consistency this documentation uses American English. Abbreviations
131 are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
132 on. To aid in reading, documentations make full use of the markup
133 characters kerneldoc provides: @parameter for function parameters, @member
134 for structure members, &amp;structure to reference structures and
135 function() for functions. These all get automatically hyperlinked if
136 kerneldoc for the referenced objects exists. When referencing entries in
137 function vtables please use -&gt;vfunc(). Note that kerneldoc does
138 not support referencing struct members directly, so please add a reference
139 to the vtable struct somewhere in the same paragraph or at least section.
140 </para>
141 <para>
142 Except in special situations (to separate locked from unlocked variants)
143 locking requirements for functions aren't documented in the kerneldoc.
144 Instead locking should be check at runtime using e.g.
145 <code>WARN_ON(!mutex_is_locked(...));</code>. Since it's much easier to
146 ignore documentation than runtime noise this provides more value. And on
147 top of that runtime checks do need to be updated when the locking rules
148 change, increasing the chances that they're correct. Within the
149 documentation the locking rules should be explained in the relevant
150 structures: Either in the comment for the lock explaining what it
151 protects, or data fields need a note about which lock protects them, or
152 both.
153 </para>
154 <para>
155 Functions which have a non-<code>void</code> return value should have a
156 section called "Returns" explaining the expected return values in
157 different cases and their meanings. Currently there's no consensus whether
158 that section name should be all upper-case or not, and whether it should
159 end in a colon or not. Go with the file-local style. Other common section
160 names are "Notes" with information for dangerous or tricky corner cases,
161 and "FIXME" where the interface could be cleaned up.
162 </para>
163 </sect1>
164 </chapter>
165
166 <!-- Internals -->
167
168 <chapter id="drmInternals">
169 <title>DRM Internals</title>
170 <para>
171 This chapter documents DRM internals relevant to driver authors
172 and developers working to add support for the latest features to
173 existing drivers.
174 </para>
175 <para>
176 First, we go over some typical driver initialization
177 requirements, like setting up command buffers, creating an
178 initial output configuration, and initializing core services.
179 Subsequent sections cover core internals in more detail,
180 providing implementation notes and examples.
181 </para>
182 <para>
183 The DRM layer provides several services to graphics drivers,
184 many of them driven by the application interfaces it provides
185 through libdrm, the library that wraps most of the DRM ioctls.
186 These include vblank event handling, memory
187 management, output management, framebuffer management, command
188 submission &amp; fencing, suspend/resume support, and DMA
189 services.
190 </para>
191
192 <!-- Internals: driver init -->
193
194 <sect1>
195 <title>Driver Initialization</title>
196 <para>
197 At the core of every DRM driver is a <structname>drm_driver</structname>
198 structure. Drivers typically statically initialize a drm_driver structure,
199 and then pass it to <function>drm_dev_alloc()</function> to allocate a
200 device instance. After the device instance is fully initialized it can be
201 registered (which makes it accessible from userspace) using
202 <function>drm_dev_register()</function>.
203 </para>
204 <para>
205 The <structname>drm_driver</structname> structure contains static
206 information that describes the driver and features it supports, and
207 pointers to methods that the DRM core will call to implement the DRM API.
208 We will first go through the <structname>drm_driver</structname> static
209 information fields, and will then describe individual operations in
210 details as they get used in later sections.
211 </para>
212 <sect2>
213 <title>Driver Information</title>
214 <sect3>
215 <title>Driver Features</title>
216 <para>
217 Drivers inform the DRM core about their requirements and supported
218 features by setting appropriate flags in the
219 <structfield>driver_features</structfield> field. Since those flags
220 influence the DRM core behaviour since registration time, most of them
221 must be set to registering the <structname>drm_driver</structname>
222 instance.
223 </para>
224 <synopsis>u32 driver_features;</synopsis>
225 <variablelist>
226 <title>Driver Feature Flags</title>
227 <varlistentry>
228 <term>DRIVER_USE_AGP</term>
229 <listitem><para>
230 Driver uses AGP interface, the DRM core will manage AGP resources.
231 </para></listitem>
232 </varlistentry>
233 <varlistentry>
234 <term>DRIVER_REQUIRE_AGP</term>
235 <listitem><para>
236 Driver needs AGP interface to function. AGP initialization failure
237 will become a fatal error.
238 </para></listitem>
239 </varlistentry>
240 <varlistentry>
241 <term>DRIVER_PCI_DMA</term>
242 <listitem><para>
243 Driver is capable of PCI DMA, mapping of PCI DMA buffers to
244 userspace will be enabled. Deprecated.
245 </para></listitem>
246 </varlistentry>
247 <varlistentry>
248 <term>DRIVER_SG</term>
249 <listitem><para>
250 Driver can perform scatter/gather DMA, allocation and mapping of
251 scatter/gather buffers will be enabled. Deprecated.
252 </para></listitem>
253 </varlistentry>
254 <varlistentry>
255 <term>DRIVER_HAVE_DMA</term>
256 <listitem><para>
257 Driver supports DMA, the userspace DMA API will be supported.
258 Deprecated.
259 </para></listitem>
260 </varlistentry>
261 <varlistentry>
262 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
263 <listitem><para>
264 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
265 managed by the DRM Core. The core will support simple IRQ handler
266 installation when the flag is set. The installation process is
267 described in <xref linkend="drm-irq-registration"/>.</para>
268 <para>DRIVER_IRQ_SHARED indicates whether the device &amp; handler
269 support shared IRQs (note that this is required of PCI drivers).
270 </para></listitem>
271 </varlistentry>
272 <varlistentry>
273 <term>DRIVER_GEM</term>
274 <listitem><para>
275 Driver use the GEM memory manager.
276 </para></listitem>
277 </varlistentry>
278 <varlistentry>
279 <term>DRIVER_MODESET</term>
280 <listitem><para>
281 Driver supports mode setting interfaces (KMS).
282 </para></listitem>
283 </varlistentry>
284 <varlistentry>
285 <term>DRIVER_PRIME</term>
286 <listitem><para>
287 Driver implements DRM PRIME buffer sharing.
288 </para></listitem>
289 </varlistentry>
290 <varlistentry>
291 <term>DRIVER_RENDER</term>
292 <listitem><para>
293 Driver supports dedicated render nodes.
294 </para></listitem>
295 </varlistentry>
296 <varlistentry>
297 <term>DRIVER_ATOMIC</term>
298 <listitem><para>
299 Driver supports atomic properties. In this case the driver
300 must implement appropriate obj->atomic_get_property() vfuncs
301 for any modeset objects with driver specific properties.
302 </para></listitem>
303 </varlistentry>
304 </variablelist>
305 </sect3>
306 <sect3>
307 <title>Major, Minor and Patchlevel</title>
308 <synopsis>int major;
309int minor;
310int patchlevel;</synopsis>
311 <para>
312 The DRM core identifies driver versions by a major, minor and patch
313 level triplet. The information is printed to the kernel log at
314 initialization time and passed to userspace through the
315 DRM_IOCTL_VERSION ioctl.
316 </para>
317 <para>
318 The major and minor numbers are also used to verify the requested driver
319 API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
320 between minor versions, applications can call DRM_IOCTL_SET_VERSION to
321 select a specific version of the API. If the requested major isn't equal
322 to the driver major, or the requested minor is larger than the driver
323 minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
324 the driver's set_version() method will be called with the requested
325 version.
326 </para>
327 </sect3>
328 <sect3>
329 <title>Name, Description and Date</title>
330 <synopsis>char *name;
331char *desc;
332char *date;</synopsis>
333 <para>
334 The driver name is printed to the kernel log at initialization time,
335 used for IRQ registration and passed to userspace through
336 DRM_IOCTL_VERSION.
337 </para>
338 <para>
339 The driver description is a purely informative string passed to
340 userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
341 the kernel.
342 </para>
343 <para>
344 The driver date, formatted as YYYYMMDD, is meant to identify the date of
345 the latest modification to the driver. However, as most drivers fail to
346 update it, its value is mostly useless. The DRM core prints it to the
347 kernel log at initialization time and passes it to userspace through the
348 DRM_IOCTL_VERSION ioctl.
349 </para>
350 </sect3>
351 </sect2>
352 <sect2>
353 <title>Device Instance and Driver Handling</title>
354!Pdrivers/gpu/drm/drm_drv.c driver instance overview
355!Edrivers/gpu/drm/drm_drv.c
356 </sect2>
357 <sect2>
358 <title>Driver Load</title>
359 <sect3 id="drm-irq-registration">
360 <title>IRQ Registration</title>
361 <para>
362 The DRM core tries to facilitate IRQ handler registration and
363 unregistration by providing <function>drm_irq_install</function> and
364 <function>drm_irq_uninstall</function> functions. Those functions only
365 support a single interrupt per device, devices that use more than one
366 IRQs need to be handled manually.
367 </para>
368 <sect4>
369 <title>Managed IRQ Registration</title>
370 <para>
371 <function>drm_irq_install</function> starts by calling the
372 <methodname>irq_preinstall</methodname> driver operation. The operation
373 is optional and must make sure that the interrupt will not get fired by
374 clearing all pending interrupt flags or disabling the interrupt.
375 </para>
376 <para>
377 The passed-in IRQ will then be requested by a call to
378 <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
379 feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
380 requested.
381 </para>
382 <para>
383 The IRQ handler function must be provided as the mandatory irq_handler
384 driver operation. It will get passed directly to
385 <function>request_irq</function> and thus has the same prototype as all
386 IRQ handlers. It will get called with a pointer to the DRM device as the
387 second argument.
388 </para>
389 <para>
390 Finally the function calls the optional
391 <methodname>irq_postinstall</methodname> driver operation. The operation
392 usually enables interrupts (excluding the vblank interrupt, which is
393 enabled separately), but drivers may choose to enable/disable interrupts
394 at a different time.
395 </para>
396 <para>
397 <function>drm_irq_uninstall</function> is similarly used to uninstall an
398 IRQ handler. It starts by waking up all processes waiting on a vblank
399 interrupt to make sure they don't hang, and then calls the optional
400 <methodname>irq_uninstall</methodname> driver operation. The operation
401 must disable all hardware interrupts. Finally the function frees the IRQ
402 by calling <function>free_irq</function>.
403 </para>
404 </sect4>
405 <sect4>
406 <title>Manual IRQ Registration</title>
407 <para>
408 Drivers that require multiple interrupt handlers can't use the managed
409 IRQ registration functions. In that case IRQs must be registered and
410 unregistered manually (usually with the <function>request_irq</function>
411 and <function>free_irq</function> functions, or their devm_* equivalent).
412 </para>
413 <para>
414 When manually registering IRQs, drivers must not set the DRIVER_HAVE_IRQ
415 driver feature flag, and must not provide the
416 <methodname>irq_handler</methodname> driver operation. They must set the
417 <structname>drm_device</structname> <structfield>irq_enabled</structfield>
418 field to 1 upon registration of the IRQs, and clear it to 0 after
419 unregistering the IRQs.
420 </para>
421 </sect4>
422 </sect3>
423 <sect3>
424 <title>Memory Manager Initialization</title>
425 <para>
426 Every DRM driver requires a memory manager which must be initialized at
427 load time. DRM currently contains two memory managers, the Translation
428 Table Manager (TTM) and the Graphics Execution Manager (GEM).
429 This document describes the use of the GEM memory manager only. See
430 <xref linkend="drm-memory-management"/> for details.
431 </para>
432 </sect3>
433 <sect3>
434 <title>Miscellaneous Device Configuration</title>
435 <para>
436 Another task that may be necessary for PCI devices during configuration
437 is mapping the video BIOS. On many devices, the VBIOS describes device
438 configuration, LCD panel timings (if any), and contains flags indicating
439 device state. Mapping the BIOS can be done using the pci_map_rom() call,
440 a convenience function that takes care of mapping the actual ROM,
441 whether it has been shadowed into memory (typically at address 0xc0000)
442 or exists on the PCI device in the ROM BAR. Note that after the ROM has
443 been mapped and any necessary information has been extracted, it should
444 be unmapped; on many devices, the ROM address decoder is shared with
445 other BARs, so leaving it mapped could cause undesired behaviour like
446 hangs or memory corruption.
447 <!--!Fdrivers/pci/rom.c pci_map_rom-->
448 </para>
449 </sect3>
450 </sect2>
451 <sect2>
452 <title>Bus-specific Device Registration and PCI Support</title>
453 <para>
454 A number of functions are provided to help with device registration.
455 The functions deal with PCI and platform devices respectively and are
456 only provided for historical reasons. These are all deprecated and
457 shouldn't be used in new drivers. Besides that there's a few
458 helpers for pci drivers.
459 </para>
460!Edrivers/gpu/drm/drm_pci.c
461!Edrivers/gpu/drm/drm_platform.c
462 </sect2>
463 </sect1>
464
465 <!-- Internals: memory management -->
466
467 <sect1 id="drm-memory-management">
468 <title>Memory management</title>
469 <para>
470 Modern Linux systems require large amount of graphics memory to store
471 frame buffers, textures, vertices and other graphics-related data. Given
472 the very dynamic nature of many of that data, managing graphics memory
473 efficiently is thus crucial for the graphics stack and plays a central
474 role in the DRM infrastructure.
475 </para>
476 <para>
477 The DRM core includes two memory managers, namely Translation Table Maps
478 (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
479 manager to be developed and tried to be a one-size-fits-them all
480 solution. It provides a single userspace API to accommodate the need of
481 all hardware, supporting both Unified Memory Architecture (UMA) devices
482 and devices with dedicated video RAM (i.e. most discrete video cards).
483 This resulted in a large, complex piece of code that turned out to be
484 hard to use for driver development.
485 </para>
486 <para>
487 GEM started as an Intel-sponsored project in reaction to TTM's
488 complexity. Its design philosophy is completely different: instead of
489 providing a solution to every graphics memory-related problems, GEM
490 identified common code between drivers and created a support library to
491 share it. GEM has simpler initialization and execution requirements than
492 TTM, but has no video RAM management capabilities and is thus limited to
493 UMA devices.
494 </para>
495 <sect2>
496 <title>The Translation Table Manager (TTM)</title>
497 <para>
498 TTM design background and information belongs here.
499 </para>
500 <sect3>
501 <title>TTM initialization</title>
502 <warning><para>This section is outdated.</para></warning>
503 <para>
504 Drivers wishing to support TTM must fill out a drm_bo_driver
505 structure. The structure contains several fields with function
506 pointers for initializing the TTM, allocating and freeing memory,
507 waiting for command completion and fence synchronization, and memory
508 migration. See the radeon_ttm.c file for an example of usage.
509 </para>
510 <para>
511 The ttm_global_reference structure is made up of several fields:
512 </para>
513 <programlisting>
514 struct ttm_global_reference {
515 enum ttm_global_types global_type;
516 size_t size;
517 void *object;
518 int (*init) (struct ttm_global_reference *);
519 void (*release) (struct ttm_global_reference *);
520 };
521 </programlisting>
522 <para>
523 There should be one global reference structure for your memory
524 manager as a whole, and there will be others for each object
525 created by the memory manager at runtime. Your global TTM should
526 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
527 object should be sizeof(struct ttm_mem_global), and the init and
528 release hooks should point at your driver-specific init and
529 release routines, which probably eventually call
530 ttm_mem_global_init and ttm_mem_global_release, respectively.
531 </para>
532 <para>
533 Once your global TTM accounting structure is set up and initialized
534 by calling ttm_global_item_ref() on it,
535 you need to create a buffer object TTM to
536 provide a pool for buffer object allocation by clients and the
537 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
538 and its size should be sizeof(struct ttm_bo_global). Again,
539 driver-specific init and release functions may be provided,
540 likely eventually calling ttm_bo_global_init() and
541 ttm_bo_global_release(), respectively. Also, like the previous
542 object, ttm_global_item_ref() is used to create an initial reference
543 count for the TTM, which will call your initialization function.
544 </para>
545 </sect3>
546 </sect2>
547 <sect2 id="drm-gem">
548 <title>The Graphics Execution Manager (GEM)</title>
549 <para>
550 The GEM design approach has resulted in a memory manager that doesn't
551 provide full coverage of all (or even all common) use cases in its
552 userspace or kernel API. GEM exposes a set of standard memory-related
553 operations to userspace and a set of helper functions to drivers, and let
554 drivers implement hardware-specific operations with their own private API.
555 </para>
556 <para>
557 The GEM userspace API is described in the
558 <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
559 Execution Manager</citetitle></ulink> article on LWN. While slightly
560 outdated, the document provides a good overview of the GEM API principles.
561 Buffer allocation and read and write operations, described as part of the
562 common GEM API, are currently implemented using driver-specific ioctls.
563 </para>
564 <para>
565 GEM is data-agnostic. It manages abstract buffer objects without knowing
566 what individual buffers contain. APIs that require knowledge of buffer
567 contents or purpose, such as buffer allocation or synchronization
568 primitives, are thus outside of the scope of GEM and must be implemented
569 using driver-specific ioctls.
570 </para>
571 <para>
572 On a fundamental level, GEM involves several operations:
573 <itemizedlist>
574 <listitem>Memory allocation and freeing</listitem>
575 <listitem>Command execution</listitem>
576 <listitem>Aperture management at command execution time</listitem>
577 </itemizedlist>
578 Buffer object allocation is relatively straightforward and largely
579 provided by Linux's shmem layer, which provides memory to back each
580 object.
581 </para>
582 <para>
583 Device-specific operations, such as command execution, pinning, buffer
584 read &amp; write, mapping, and domain ownership transfers are left to
585 driver-specific ioctls.
586 </para>
587 <sect3>
588 <title>GEM Initialization</title>
589 <para>
590 Drivers that use GEM must set the DRIVER_GEM bit in the struct
591 <structname>drm_driver</structname>
592 <structfield>driver_features</structfield> field. The DRM core will
593 then automatically initialize the GEM core before calling the
594 <methodname>load</methodname> operation. Behind the scene, this will
595 create a DRM Memory Manager object which provides an address space
596 pool for object allocation.
597 </para>
598 <para>
599 In a KMS configuration, drivers need to allocate and initialize a
600 command ring buffer following core GEM initialization if required by
601 the hardware. UMA devices usually have what is called a "stolen"
602 memory region, which provides space for the initial framebuffer and
603 large, contiguous memory regions required by the device. This space is
604 typically not managed by GEM, and must be initialized separately into
605 its own DRM MM object.
606 </para>
607 </sect3>
608 <sect3>
609 <title>GEM Objects Creation</title>
610 <para>
611 GEM splits creation of GEM objects and allocation of the memory that
612 backs them in two distinct operations.
613 </para>
614 <para>
615 GEM objects are represented by an instance of struct
616 <structname>drm_gem_object</structname>. Drivers usually need to extend
617 GEM objects with private information and thus create a driver-specific
618 GEM object structure type that embeds an instance of struct
619 <structname>drm_gem_object</structname>.
620 </para>
621 <para>
622 To create a GEM object, a driver allocates memory for an instance of its
623 specific GEM object type and initializes the embedded struct
624 <structname>drm_gem_object</structname> with a call to
625 <function>drm_gem_object_init</function>. The function takes a pointer to
626 the DRM device, a pointer to the GEM object and the buffer object size
627 in bytes.
628 </para>
629 <para>
630 GEM uses shmem to allocate anonymous pageable memory.
631 <function>drm_gem_object_init</function> will create an shmfs file of
632 the requested size and store it into the struct
633 <structname>drm_gem_object</structname> <structfield>filp</structfield>
634 field. The memory is used as either main storage for the object when the
635 graphics hardware uses system memory directly or as a backing store
636 otherwise.
637 </para>
638 <para>
639 Drivers are responsible for the actual physical pages allocation by
640 calling <function>shmem_read_mapping_page_gfp</function> for each page.
641 Note that they can decide to allocate pages when initializing the GEM
642 object, or to delay allocation until the memory is needed (for instance
643 when a page fault occurs as a result of a userspace memory access or
644 when the driver needs to start a DMA transfer involving the memory).
645 </para>
646 <para>
647 Anonymous pageable memory allocation is not always desired, for instance
648 when the hardware requires physically contiguous system memory as is
649 often the case in embedded devices. Drivers can create GEM objects with
650 no shmfs backing (called private GEM objects) by initializing them with
651 a call to <function>drm_gem_private_object_init</function> instead of
652 <function>drm_gem_object_init</function>. Storage for private GEM
653 objects must be managed by drivers.
654 </para>
655 </sect3>
656 <sect3>
657 <title>GEM Objects Lifetime</title>
658 <para>
659 All GEM objects are reference-counted by the GEM core. References can be
660 acquired and release by <function>calling drm_gem_object_reference</function>
661 and <function>drm_gem_object_unreference</function> respectively. The
662 caller must hold the <structname>drm_device</structname>
663 <structfield>struct_mutex</structfield> lock when calling
664 <function>drm_gem_object_reference</function>. As a convenience, GEM
665 provides <function>drm_gem_object_unreference_unlocked</function>
666 functions that can be called without holding the lock.
667 </para>
668 <para>
669 When the last reference to a GEM object is released the GEM core calls
670 the <structname>drm_driver</structname>
671 <methodname>gem_free_object</methodname> operation. That operation is
672 mandatory for GEM-enabled drivers and must free the GEM object and all
673 associated resources.
674 </para>
675 <para>
676 <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
677 Drivers are responsible for freeing all GEM object resources. This includes
678 the resources created by the GEM core, which need to be released with
679 <function>drm_gem_object_release</function>.
680 </para>
681 </sect3>
682 <sect3>
683 <title>GEM Objects Naming</title>
684 <para>
685 Communication between userspace and the kernel refers to GEM objects
686 using local handles, global names or, more recently, file descriptors.
687 All of those are 32-bit integer values; the usual Linux kernel limits
688 apply to the file descriptors.
689 </para>
690 <para>
691 GEM handles are local to a DRM file. Applications get a handle to a GEM
692 object through a driver-specific ioctl, and can use that handle to refer
693 to the GEM object in other standard or driver-specific ioctls. Closing a
694 DRM file handle frees all its GEM handles and dereferences the
695 associated GEM objects.
696 </para>
697 <para>
698 To create a handle for a GEM object drivers call
699 <function>drm_gem_handle_create</function>. The function takes a pointer
700 to the DRM file and the GEM object and returns a locally unique handle.
701 When the handle is no longer needed drivers delete it with a call to
702 <function>drm_gem_handle_delete</function>. Finally the GEM object
703 associated with a handle can be retrieved by a call to
704 <function>drm_gem_object_lookup</function>.
705 </para>
706 <para>
707 Handles don't take ownership of GEM objects, they only take a reference
708 to the object that will be dropped when the handle is destroyed. To
709 avoid leaking GEM objects, drivers must make sure they drop the
710 reference(s) they own (such as the initial reference taken at object
711 creation time) as appropriate, without any special consideration for the
712 handle. For example, in the particular case of combined GEM object and
713 handle creation in the implementation of the
714 <methodname>dumb_create</methodname> operation, drivers must drop the
715 initial reference to the GEM object before returning the handle.
716 </para>
717 <para>
718 GEM names are similar in purpose to handles but are not local to DRM
719 files. They can be passed between processes to reference a GEM object
720 globally. Names can't be used directly to refer to objects in the DRM
721 API, applications must convert handles to names and names to handles
722 using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
723 respectively. The conversion is handled by the DRM core without any
724 driver-specific support.
725 </para>
726 <para>
727 GEM also supports buffer sharing with dma-buf file descriptors through
728 PRIME. GEM-based drivers must use the provided helpers functions to
729 implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
730 Since sharing file descriptors is inherently more secure than the
731 easily guessable and global GEM names it is the preferred buffer
732 sharing mechanism. Sharing buffers through GEM names is only supported
733 for legacy userspace. Furthermore PRIME also allows cross-device
734 buffer sharing since it is based on dma-bufs.
735 </para>
736 </sect3>
737 <sect3 id="drm-gem-objects-mapping">
738 <title>GEM Objects Mapping</title>
739 <para>
740 Because mapping operations are fairly heavyweight GEM favours
741 read/write-like access to buffers, implemented through driver-specific
742 ioctls, over mapping buffers to userspace. However, when random access
743 to the buffer is needed (to perform software rendering for instance),
744 direct access to the object can be more efficient.
745 </para>
746 <para>
747 The mmap system call can't be used directly to map GEM objects, as they
748 don't have their own file handle. Two alternative methods currently
749 co-exist to map GEM objects to userspace. The first method uses a
750 driver-specific ioctl to perform the mapping operation, calling
751 <function>do_mmap</function> under the hood. This is often considered
752 dubious, seems to be discouraged for new GEM-enabled drivers, and will
753 thus not be described here.
754 </para>
755 <para>
756 The second method uses the mmap system call on the DRM file handle.
757 <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
758 off_t offset);</synopsis>
759 DRM identifies the GEM object to be mapped by a fake offset passed
760 through the mmap offset argument. Prior to being mapped, a GEM object
761 must thus be associated with a fake offset. To do so, drivers must call
762 <function>drm_gem_create_mmap_offset</function> on the object.
763 </para>
764 <para>
765 Once allocated, the fake offset value
766 must be passed to the application in a driver-specific way and can then
767 be used as the mmap offset argument.
768 </para>
769 <para>
770 The GEM core provides a helper method <function>drm_gem_mmap</function>
771 to handle object mapping. The method can be set directly as the mmap
772 file operation handler. It will look up the GEM object based on the
773 offset value and set the VMA operations to the
774 <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
775 field. Note that <function>drm_gem_mmap</function> doesn't map memory to
776 userspace, but relies on the driver-provided fault handler to map pages
777 individually.
778 </para>
779 <para>
780 To use <function>drm_gem_mmap</function>, drivers must fill the struct
781 <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
782 field with a pointer to VM operations.
783 </para>
784 <para>
785 <synopsis>struct vm_operations_struct *gem_vm_ops
786
787 struct vm_operations_struct {
788 void (*open)(struct vm_area_struct * area);
789 void (*close)(struct vm_area_struct * area);
790 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
791 };</synopsis>
792 </para>
793 <para>
794 The <methodname>open</methodname> and <methodname>close</methodname>
795 operations must update the GEM object reference count. Drivers can use
796 the <function>drm_gem_vm_open</function> and
797 <function>drm_gem_vm_close</function> helper functions directly as open
798 and close handlers.
799 </para>
800 <para>
801 The fault operation handler is responsible for mapping individual pages
802 to userspace when a page fault occurs. Depending on the memory
803 allocation scheme, drivers can allocate pages at fault time, or can
804 decide to allocate memory for the GEM object at the time the object is
805 created.
806 </para>
807 <para>
808 Drivers that want to map the GEM object upfront instead of handling page
809 faults can implement their own mmap file operation handler.
810 </para>
811 </sect3>
812 <sect3>
813 <title>Memory Coherency</title>
814 <para>
815 When mapped to the device or used in a command buffer, backing pages
816 for an object are flushed to memory and marked write combined so as to
817 be coherent with the GPU. Likewise, if the CPU accesses an object
818 after the GPU has finished rendering to the object, then the object
819 must be made coherent with the CPU's view of memory, usually involving
820 GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
821 coherency management is provided by a device-specific ioctl, which
822 evaluates an object's current domain and performs any necessary
823 flushing or synchronization to put the object into the desired
824 coherency domain (note that the object may be busy, i.e. an active
825 render target; in that case, setting the domain blocks the client and
826 waits for rendering to complete before performing any necessary
827 flushing operations).
828 </para>
829 </sect3>
830 <sect3>
831 <title>Command Execution</title>
832 <para>
833 Perhaps the most important GEM function for GPU devices is providing a
834 command execution interface to clients. Client programs construct
835 command buffers containing references to previously allocated memory
836 objects, and then submit them to GEM. At that point, GEM takes care to
837 bind all the objects into the GTT, execute the buffer, and provide
838 necessary synchronization between clients accessing the same buffers.
839 This often involves evicting some objects from the GTT and re-binding
840 others (a fairly expensive operation), and providing relocation
841 support which hides fixed GTT offsets from clients. Clients must take
842 care not to submit command buffers that reference more objects than
843 can fit in the GTT; otherwise, GEM will reject them and no rendering
844 will occur. Similarly, if several objects in the buffer require fence
845 registers to be allocated for correct rendering (e.g. 2D blits on
846 pre-965 chips), care must be taken not to require more fence registers
847 than are available to the client. Such resource management should be
848 abstracted from the client in libdrm.
849 </para>
850 </sect3>
851 </sect2>
852 <sect2>
853 <title>GEM Function Reference</title>
854!Edrivers/gpu/drm/drm_gem.c
855!Iinclude/drm/drm_gem.h
856 </sect2>
857 <sect2>
858 <title>VMA Offset Manager</title>
859!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
860!Edrivers/gpu/drm/drm_vma_manager.c
861!Iinclude/drm/drm_vma_manager.h
862 </sect2>
863 <sect2 id="drm-prime-support">
864 <title>PRIME Buffer Sharing</title>
865 <para>
866 PRIME is the cross device buffer sharing framework in drm, originally
867 created for the OPTIMUS range of multi-gpu platforms. To userspace
868 PRIME buffers are dma-buf based file descriptors.
869 </para>
870 <sect3>
871 <title>Overview and Driver Interface</title>
872 <para>
873 Similar to GEM global names, PRIME file descriptors are
874 also used to share buffer objects across processes. They offer
875 additional security: as file descriptors must be explicitly sent over
876 UNIX domain sockets to be shared between applications, they can't be
877 guessed like the globally unique GEM names.
878 </para>
879 <para>
880 Drivers that support the PRIME
881 API must set the DRIVER_PRIME bit in the struct
882 <structname>drm_driver</structname>
883 <structfield>driver_features</structfield> field, and implement the
884 <methodname>prime_handle_to_fd</methodname> and
885 <methodname>prime_fd_to_handle</methodname> operations.
886 </para>
887 <para>
888 <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
889 struct drm_file *file_priv, uint32_t handle,
890 uint32_t flags, int *prime_fd);
891int (*prime_fd_to_handle)(struct drm_device *dev,
892 struct drm_file *file_priv, int prime_fd,
893 uint32_t *handle);</synopsis>
894 Those two operations convert a handle to a PRIME file descriptor and
895 vice versa. Drivers must use the kernel dma-buf buffer sharing framework
896 to manage the PRIME file descriptors. Similar to the mode setting
897 API PRIME is agnostic to the underlying buffer object manager, as
898 long as handles are 32bit unsigned integers.
899 </para>
900 <para>
901 While non-GEM drivers must implement the operations themselves, GEM
902 drivers must use the <function>drm_gem_prime_handle_to_fd</function>
903 and <function>drm_gem_prime_fd_to_handle</function> helper functions.
904 Those helpers rely on the driver
905 <methodname>gem_prime_export</methodname> and
906 <methodname>gem_prime_import</methodname> operations to create a dma-buf
907 instance from a GEM object (dma-buf exporter role) and to create a GEM
908 object from a dma-buf instance (dma-buf importer role).
909 </para>
910 <para>
911 <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
912 struct drm_gem_object *obj,
913 int flags);
914struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
915 struct dma_buf *dma_buf);</synopsis>
916 These two operations are mandatory for GEM drivers that support
917 PRIME.
918 </para>
919 </sect3>
920 <sect3>
921 <title>PRIME Helper Functions</title>
922!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
923 </sect3>
924 </sect2>
925 <sect2>
926 <title>PRIME Function References</title>
927!Edrivers/gpu/drm/drm_prime.c
928 </sect2>
929 <sect2>
930 <title>DRM MM Range Allocator</title>
931 <sect3>
932 <title>Overview</title>
933!Pdrivers/gpu/drm/drm_mm.c Overview
934 </sect3>
935 <sect3>
936 <title>LRU Scan/Eviction Support</title>
937!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
938 </sect3>
939 </sect2>
940 <sect2>
941 <title>DRM MM Range Allocator Function References</title>
942!Edrivers/gpu/drm/drm_mm.c
943!Iinclude/drm/drm_mm.h
944 </sect2>
945 <sect2>
946 <title>CMA Helper Functions Reference</title>
947!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
948!Edrivers/gpu/drm/drm_gem_cma_helper.c
949!Iinclude/drm/drm_gem_cma_helper.h
950 </sect2>
951 </sect1>
952
953 <!-- Internals: mode setting -->
954
955 <sect1 id="drm-mode-setting">
956 <title>Mode Setting</title>
957 <para>
958 Drivers must initialize the mode setting core by calling
959 <function>drm_mode_config_init</function> on the DRM device. The function
960 initializes the <structname>drm_device</structname>
961 <structfield>mode_config</structfield> field and never fails. Once done,
962 mode configuration must be setup by initializing the following fields.
963 </para>
964 <itemizedlist>
965 <listitem>
966 <synopsis>int min_width, min_height;
967int max_width, max_height;</synopsis>
968 <para>
969 Minimum and maximum width and height of the frame buffers in pixel
970 units.
971 </para>
972 </listitem>
973 <listitem>
974 <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
975 <para>Mode setting functions.</para>
976 </listitem>
977 </itemizedlist>
978 <sect2>
979 <title>Display Modes Function Reference</title>
980!Iinclude/drm/drm_modes.h
981!Edrivers/gpu/drm/drm_modes.c
982 </sect2>
983 <sect2>
984 <title>Atomic Mode Setting Function Reference</title>
985!Edrivers/gpu/drm/drm_atomic.c
986!Idrivers/gpu/drm/drm_atomic.c
987 </sect2>
988 <sect2>
989 <title>Frame Buffer Abstraction</title>
990 <para>
991 Frame buffers are abstract memory objects that provide a source of
992 pixels to scanout to a CRTC. Applications explicitly request the
993 creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
994 receive an opaque handle that can be passed to the KMS CRTC control,
995 plane configuration and page flip functions.
996 </para>
997 <para>
998 Frame buffers rely on the underneath memory manager for low-level memory
999 operations. When creating a frame buffer applications pass a memory
1000 handle (or a list of memory handles for multi-planar formats) through
1001 the <parameter>drm_mode_fb_cmd2</parameter> argument. For drivers using
1002 GEM as their userspace buffer management interface this would be a GEM
1003 handle. Drivers are however free to use their own backing storage object
1004 handles, e.g. vmwgfx directly exposes special TTM handles to userspace
1005 and so expects TTM handles in the create ioctl and not GEM handles.
1006 </para>
1007 <para>
1008 The lifetime of a drm framebuffer is controlled with a reference count,
1009 drivers can grab additional references with
1010 <function>drm_framebuffer_reference</function>and drop them
1011 again with <function>drm_framebuffer_unreference</function>. For
1012 driver-private framebuffers for which the last reference is never
1013 dropped (e.g. for the fbdev framebuffer when the struct
1014 <structname>drm_framebuffer</structname> is embedded into the fbdev
1015 helper struct) drivers can manually clean up a framebuffer at module
1016 unload time with
1017 <function>drm_framebuffer_unregister_private</function>.
1018 </para>
1019 </sect2>
1020 <sect2>
1021 <title>Dumb Buffer Objects</title>
1022 <para>
1023 The KMS API doesn't standardize backing storage object creation and
1024 leaves it to driver-specific ioctls. Furthermore actually creating a
1025 buffer object even for GEM-based drivers is done through a
1026 driver-specific ioctl - GEM only has a common userspace interface for
1027 sharing and destroying objects. While not an issue for full-fledged
1028 graphics stacks that include device-specific userspace components (in
1029 libdrm for instance), this limit makes DRM-based early boot graphics
1030 unnecessarily complex.
1031 </para>
1032 <para>
1033 Dumb objects partly alleviate the problem by providing a standard
1034 API to create dumb buffers suitable for scanout, which can then be used
1035 to create KMS frame buffers.
1036 </para>
1037 <para>
1038 To support dumb objects drivers must implement the
1039 <methodname>dumb_create</methodname>,
1040 <methodname>dumb_destroy</methodname> and
1041 <methodname>dumb_map_offset</methodname> operations.
1042 </para>
1043 <itemizedlist>
1044 <listitem>
1045 <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
1046 struct drm_mode_create_dumb *args);</synopsis>
1047 <para>
1048 The <methodname>dumb_create</methodname> operation creates a driver
1049 object (GEM or TTM handle) suitable for scanout based on the
1050 width, height and depth from the struct
1051 <structname>drm_mode_create_dumb</structname> argument. It fills the
1052 argument's <structfield>handle</structfield>,
1053 <structfield>pitch</structfield> and <structfield>size</structfield>
1054 fields with a handle for the newly created object and its line
1055 pitch and size in bytes.
1056 </para>
1057 </listitem>
1058 <listitem>
1059 <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
1060 uint32_t handle);</synopsis>
1061 <para>
1062 The <methodname>dumb_destroy</methodname> operation destroys a dumb
1063 object created by <methodname>dumb_create</methodname>.
1064 </para>
1065 </listitem>
1066 <listitem>
1067 <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
1068 uint32_t handle, uint64_t *offset);</synopsis>
1069 <para>
1070 The <methodname>dumb_map_offset</methodname> operation associates an
1071 mmap fake offset with the object given by the handle and returns
1072 it. Drivers must use the
1073 <function>drm_gem_create_mmap_offset</function> function to
1074 associate the fake offset as described in
1075 <xref linkend="drm-gem-objects-mapping"/>.
1076 </para>
1077 </listitem>
1078 </itemizedlist>
1079 <para>
1080 Note that dumb objects may not be used for gpu acceleration, as has been
1081 attempted on some ARM embedded platforms. Such drivers really must have
1082 a hardware-specific ioctl to allocate suitable buffer objects.
1083 </para>
1084 </sect2>
1085 <sect2>
1086 <title>Output Polling</title>
1087 <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
1088 <para>
1089 This operation notifies the driver that the status of one or more
1090 connectors has changed. Drivers that use the fb helper can just call the
1091 <function>drm_fb_helper_hotplug_event</function> function to handle this
1092 operation.
1093 </para>
1094 </sect2>
1095 <sect2>
1096 <title>Locking</title>
1097 <para>
1098 Beside some lookup structures with their own locking (which is hidden
1099 behind the interface functions) most of the modeset state is protected
1100 by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
1101 per-crtc locks to allow cursor updates, pageflips and similar operations
1102 to occur concurrently with background tasks like output detection.
1103 Operations which cross domains like a full modeset always grab all
1104 locks. Drivers there need to protect resources shared between crtcs with
1105 additional locking. They also need to be careful to always grab the
1106 relevant crtc locks if a modset functions touches crtc state, e.g. for
1107 load detection (which does only grab the <code>mode_config.lock</code>
1108 to allow concurrent screen updates on live crtcs).
1109 </para>
1110 </sect2>
1111 </sect1>
1112
1113 <!-- Internals: kms initialization and cleanup -->
1114
1115 <sect1 id="drm-kms-init">
1116 <title>KMS Initialization and Cleanup</title>
1117 <para>
1118 A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
1119 and connectors. KMS drivers must thus create and initialize all those
1120 objects at load time after initializing mode setting.
1121 </para>
1122 <sect2>
1123 <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
1124 <para>
1125 A CRTC is an abstraction representing a part of the chip that contains a
1126 pointer to a scanout buffer. Therefore, the number of CRTCs available
1127 determines how many independent scanout buffers can be active at any
1128 given time. The CRTC structure contains several fields to support this:
1129 a pointer to some video memory (abstracted as a frame buffer object), a
1130 display mode, and an (x, y) offset into the video memory to support
1131 panning or configurations where one piece of video memory spans multiple
1132 CRTCs.
1133 </para>
1134 <sect3>
1135 <title>CRTC Initialization</title>
1136 <para>
1137 A KMS device must create and register at least one struct
1138 <structname>drm_crtc</structname> instance. The instance is allocated
1139 and zeroed by the driver, possibly as part of a larger structure, and
1140 registered with a call to <function>drm_crtc_init</function> with a
1141 pointer to CRTC functions.
1142 </para>
1143 </sect3>
1144 </sect2>
1145 <sect2>
1146 <title>Planes (struct <structname>drm_plane</structname>)</title>
1147 <para>
1148 A plane represents an image source that can be blended with or overlayed
1149 on top of a CRTC during the scanout process. Planes are associated with
1150 a frame buffer to crop a portion of the image memory (source) and
1151 optionally scale it to a destination size. The result is then blended
1152 with or overlayed on top of a CRTC.
1153 </para>
1154 <para>
1155 The DRM core recognizes three types of planes:
1156 <itemizedlist>
1157 <listitem>
1158 DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
1159 planes are the planes operated upon by CRTC modesetting and flipping
1160 operations described in the page_flip hook in <structname>drm_crtc_funcs</structname>.
1161 </listitem>
1162 <listitem>
1163 DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor
1164 planes are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
1165 DRM_IOCTL_MODE_CURSOR2 ioctls.
1166 </listitem>
1167 <listitem>
1168 DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor planes.
1169 Some drivers refer to these types of planes as "sprites" internally.
1170 </listitem>
1171 </itemizedlist>
1172 For compatibility with legacy userspace, only overlay planes are made
1173 available to userspace by default. Userspace clients may set the
1174 DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that
1175 they wish to receive a universal plane list containing all plane types.
1176 </para>
1177 <sect3>
1178 <title>Plane Initialization</title>
1179 <para>
1180 To create a plane, a KMS drivers allocates and
1181 zeroes an instances of struct <structname>drm_plane</structname>
1182 (possibly as part of a larger structure) and registers it with a call
1183 to <function>drm_universal_plane_init</function>. The function takes a bitmask
1184 of the CRTCs that can be associated with the plane, a pointer to the
1185 plane functions, a list of format supported formats, and the type of
1186 plane (primary, cursor, or overlay) being initialized.
1187 </para>
1188 <para>
1189 Cursor and overlay planes are optional. All drivers should provide
1190 one primary plane per CRTC (although this requirement may change in
1191 the future); drivers that do not wish to provide special handling for
1192 primary planes may make use of the helper functions described in
1193 <xref linkend="drm-kms-planehelpers"/> to create and register a
1194 primary plane with standard capabilities.
1195 </para>
1196 </sect3>
1197 </sect2>
1198 <sect2>
1199 <title>Encoders (struct <structname>drm_encoder</structname>)</title>
1200 <para>
1201 An encoder takes pixel data from a CRTC and converts it to a format
1202 suitable for any attached connectors. On some devices, it may be
1203 possible to have a CRTC send data to more than one encoder. In that
1204 case, both encoders would receive data from the same scanout buffer,
1205 resulting in a "cloned" display configuration across the connectors
1206 attached to each encoder.
1207 </para>
1208 <sect3>
1209 <title>Encoder Initialization</title>
1210 <para>
1211 As for CRTCs, a KMS driver must create, initialize and register at
1212 least one struct <structname>drm_encoder</structname> instance. The
1213 instance is allocated and zeroed by the driver, possibly as part of a
1214 larger structure.
1215 </para>
1216 <para>
1217 Drivers must initialize the struct <structname>drm_encoder</structname>
1218 <structfield>possible_crtcs</structfield> and
1219 <structfield>possible_clones</structfield> fields before registering the
1220 encoder. Both fields are bitmasks of respectively the CRTCs that the
1221 encoder can be connected to, and sibling encoders candidate for cloning.
1222 </para>
1223 <para>
1224 After being initialized, the encoder must be registered with a call to
1225 <function>drm_encoder_init</function>. The function takes a pointer to
1226 the encoder functions and an encoder type. Supported types are
1227 <itemizedlist>
1228 <listitem>
1229 DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
1230 </listitem>
1231 <listitem>
1232 DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
1233 </listitem>
1234 <listitem>
1235 DRM_MODE_ENCODER_LVDS for display panels
1236 </listitem>
1237 <listitem>
1238 DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
1239 SCART)
1240 </listitem>
1241 <listitem>
1242 DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
1243 </listitem>
1244 </itemizedlist>
1245 </para>
1246 <para>
1247 Encoders must be attached to a CRTC to be used. DRM drivers leave
1248 encoders unattached at initialization time. Applications (or the fbdev
1249 compatibility layer when implemented) are responsible for attaching the
1250 encoders they want to use to a CRTC.
1251 </para>
1252 </sect3>
1253 </sect2>
1254 <sect2>
1255 <title>Connectors (struct <structname>drm_connector</structname>)</title>
1256 <para>
1257 A connector is the final destination for pixel data on a device, and
1258 usually connects directly to an external display device like a monitor
1259 or laptop panel. A connector can only be attached to one encoder at a
1260 time. The connector is also the structure where information about the
1261 attached display is kept, so it contains fields for display data, EDID
1262 data, DPMS &amp; connection status, and information about modes
1263 supported on the attached displays.
1264 </para>
1265 <sect3>
1266 <title>Connector Initialization</title>
1267 <para>
1268 Finally a KMS driver must create, initialize, register and attach at
1269 least one struct <structname>drm_connector</structname> instance. The
1270 instance is created as other KMS objects and initialized by setting the
1271 following fields.
1272 </para>
1273 <variablelist>
1274 <varlistentry>
1275 <term><structfield>interlace_allowed</structfield></term>
1276 <listitem><para>
1277 Whether the connector can handle interlaced modes.
1278 </para></listitem>
1279 </varlistentry>
1280 <varlistentry>
1281 <term><structfield>doublescan_allowed</structfield></term>
1282 <listitem><para>
1283 Whether the connector can handle doublescan.
1284 </para></listitem>
1285 </varlistentry>
1286 <varlistentry>
1287 <term><structfield>display_info
1288 </structfield></term>
1289 <listitem><para>
1290 Display information is filled from EDID information when a display
1291 is detected. For non hot-pluggable displays such as flat panels in
1292 embedded systems, the driver should initialize the
1293 <structfield>display_info</structfield>.<structfield>width_mm</structfield>
1294 and
1295 <structfield>display_info</structfield>.<structfield>height_mm</structfield>
1296 fields with the physical size of the display.
1297 </para></listitem>
1298 </varlistentry>
1299 <varlistentry>
1300 <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
1301 <listitem><para>
1302 Connector polling mode, a combination of
1303 <variablelist>
1304 <varlistentry>
1305 <term>DRM_CONNECTOR_POLL_HPD</term>
1306 <listitem><para>
1307 The connector generates hotplug events and doesn't need to be
1308 periodically polled. The CONNECT and DISCONNECT flags must not
1309 be set together with the HPD flag.
1310 </para></listitem>
1311 </varlistentry>
1312 <varlistentry>
1313 <term>DRM_CONNECTOR_POLL_CONNECT</term>
1314 <listitem><para>
1315 Periodically poll the connector for connection.
1316 </para></listitem>
1317 </varlistentry>
1318 <varlistentry>
1319 <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
1320 <listitem><para>
1321 Periodically poll the connector for disconnection.
1322 </para></listitem>
1323 </varlistentry>
1324 </variablelist>
1325 Set to 0 for connectors that don't support connection status
1326 discovery.
1327 </para></listitem>
1328 </varlistentry>
1329 </variablelist>
1330 <para>
1331 The connector is then registered with a call to
1332 <function>drm_connector_init</function> with a pointer to the connector
1333 functions and a connector type, and exposed through sysfs with a call to
1334 <function>drm_connector_register</function>.
1335 </para>
1336 <para>
1337 Supported connector types are
1338 <itemizedlist>
1339 <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
1340 <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
1341 <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
1342 <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
1343 <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
1344 <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
1345 <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
1346 <listitem>DRM_MODE_CONNECTOR_Component</listitem>
1347 <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
1348 <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
1349 <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
1350 <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
1351 <listitem>DRM_MODE_CONNECTOR_TV</listitem>
1352 <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
1353 <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
1354 </itemizedlist>
1355 </para>
1356 <para>
1357 Connectors must be attached to an encoder to be used. For devices that
1358 map connectors to encoders 1:1, the connector should be attached at
1359 initialization time with a call to
1360 <function>drm_mode_connector_attach_encoder</function>. The driver must
1361 also set the <structname>drm_connector</structname>
1362 <structfield>encoder</structfield> field to point to the attached
1363 encoder.
1364 </para>
1365 <para>
1366 Finally, drivers must initialize the connectors state change detection
1367 with a call to <function>drm_kms_helper_poll_init</function>. If at
1368 least one connector is pollable but can't generate hotplug interrupts
1369 (indicated by the DRM_CONNECTOR_POLL_CONNECT and
1370 DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
1371 automatically be queued to periodically poll for changes. Connectors
1372 that can generate hotplug interrupts must be marked with the
1373 DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
1374 call <function>drm_helper_hpd_irq_event</function>. The function will
1375 queue a delayed work to check the state of all connectors, but no
1376 periodic polling will be done.
1377 </para>
1378 </sect3>
1379 <sect3>
1380 <title>Connector Operations</title>
1381 <note><para>
1382 Unless otherwise state, all operations are mandatory.
1383 </para></note>
1384 <sect4>
1385 <title>DPMS</title>
1386 <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
1387 <para>
1388 The DPMS operation sets the power state of a connector. The mode
1389 argument is one of
1390 <itemizedlist>
1391 <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
1392 <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
1393 <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
1394 <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
1395 </itemizedlist>
1396 </para>
1397 <para>
1398 In all but DPMS_ON mode the encoder to which the connector is attached
1399 should put the display in low-power mode by driving its signals
1400 appropriately. If more than one connector is attached to the encoder
1401 care should be taken not to change the power state of other displays as
1402 a side effect. Low-power mode should be propagated to the encoders and
1403 CRTCs when all related connectors are put in low-power mode.
1404 </para>
1405 </sect4>
1406 <sect4>
1407 <title>Modes</title>
1408 <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
1409 uint32_t max_height);</synopsis>
1410 <para>
1411 Fill the mode list with all supported modes for the connector. If the
1412 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1413 arguments are non-zero, the implementation must ignore all modes wider
1414 than <parameter>max_width</parameter> or higher than
1415 <parameter>max_height</parameter>.
1416 </para>
1417 <para>
1418 The connector must also fill in this operation its
1419 <structfield>display_info</structfield>
1420 <structfield>width_mm</structfield> and
1421 <structfield>height_mm</structfield> fields with the connected display
1422 physical size in millimeters. The fields should be set to 0 if the value
1423 isn't known or is not applicable (for instance for projector devices).
1424 </para>
1425 </sect4>
1426 <sect4>
1427 <title>Connection Status</title>
1428 <para>
1429 The connection status is updated through polling or hotplug events when
1430 supported (see <xref linkend="drm-kms-connector-polled"/>). The status
1431 value is reported to userspace through ioctls and must not be used
1432 inside the driver, as it only gets initialized by a call to
1433 <function>drm_mode_getconnector</function> from userspace.
1434 </para>
1435 <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
1436 bool force);</synopsis>
1437 <para>
1438 Check to see if anything is attached to the connector. The
1439 <parameter>force</parameter> parameter is set to false whilst polling or
1440 to true when checking the connector due to user request.
1441 <parameter>force</parameter> can be used by the driver to avoid
1442 expensive, destructive operations during automated probing.
1443 </para>
1444 <para>
1445 Return connector_status_connected if something is connected to the
1446 connector, connector_status_disconnected if nothing is connected and
1447 connector_status_unknown if the connection state isn't known.
1448 </para>
1449 <para>
1450 Drivers should only return connector_status_connected if the connection
1451 status has really been probed as connected. Connectors that can't detect
1452 the connection status, or failed connection status probes, should return
1453 connector_status_unknown.
1454 </para>
1455 </sect4>
1456 </sect3>
1457 </sect2>
1458 <sect2>
1459 <title>Cleanup</title>
1460 <para>
1461 The DRM core manages its objects' lifetime. When an object is not needed
1462 anymore the core calls its destroy function, which must clean up and
1463 free every resource allocated for the object. Every
1464 <function>drm_*_init</function> call must be matched with a
1465 corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
1466 (<function>drm_crtc_cleanup</function>), planes
1467 (<function>drm_plane_cleanup</function>), encoders
1468 (<function>drm_encoder_cleanup</function>) and connectors
1469 (<function>drm_connector_cleanup</function>). Furthermore, connectors
1470 that have been added to sysfs must be removed by a call to
1471 <function>drm_connector_unregister</function> before calling
1472 <function>drm_connector_cleanup</function>.
1473 </para>
1474 <para>
1475 Connectors state change detection must be cleanup up with a call to
1476 <function>drm_kms_helper_poll_fini</function>.
1477 </para>
1478 </sect2>
1479 <sect2>
1480 <title>Output discovery and initialization example</title>
1481 <programlisting><![CDATA[
1482void intel_crt_init(struct drm_device *dev)
1483{
1484 struct drm_connector *connector;
1485 struct intel_output *intel_output;
1486
1487 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
1488 if (!intel_output)
1489 return;
1490
1491 connector = &intel_output->base;
1492 drm_connector_init(dev, &intel_output->base,
1493 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
1494
1495 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
1496 DRM_MODE_ENCODER_DAC);
1497
1498 drm_mode_connector_attach_encoder(&intel_output->base,
1499 &intel_output->enc);
1500
1501 /* Set up the DDC bus. */
1502 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
1503 if (!intel_output->ddc_bus) {
1504 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
1505 "failed.\n");
1506 return;
1507 }
1508
1509 intel_output->type = INTEL_OUTPUT_ANALOG;
1510 connector->interlace_allowed = 0;
1511 connector->doublescan_allowed = 0;
1512
1513 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
1514 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
1515
1516 drm_connector_register(connector);
1517}]]></programlisting>
1518 <para>
1519 In the example above (taken from the i915 driver), a CRTC, connector and
1520 encoder combination is created. A device-specific i2c bus is also
1521 created for fetching EDID data and performing monitor detection. Once
1522 the process is complete, the new connector is registered with sysfs to
1523 make its properties available to applications.
1524 </para>
1525 </sect2>
1526 <sect2>
1527 <title>KMS API Functions</title>
1528!Edrivers/gpu/drm/drm_crtc.c
1529 </sect2>
1530 <sect2>
1531 <title>KMS Data Structures</title>
1532!Iinclude/drm/drm_crtc.h
1533 </sect2>
1534 <sect2>
1535 <title>KMS Locking</title>
1536!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
1537!Iinclude/drm/drm_modeset_lock.h
1538!Edrivers/gpu/drm/drm_modeset_lock.c
1539 </sect2>
1540 </sect1>
1541
1542 <!-- Internals: kms helper functions -->
1543
1544 <sect1>
1545 <title>Mode Setting Helper Functions</title>
1546 <para>
1547 The plane, CRTC, encoder and connector functions provided by the drivers
1548 implement the DRM API. They're called by the DRM core and ioctl handlers
1549 to handle device state changes and configuration request. As implementing
1550 those functions often requires logic not specific to drivers, mid-layer
1551 helper functions are available to avoid duplicating boilerplate code.
1552 </para>
1553 <para>
1554 The DRM core contains one mid-layer implementation. The mid-layer provides
1555 implementations of several plane, CRTC, encoder and connector functions
1556 (called from the top of the mid-layer) that pre-process requests and call
1557 lower-level functions provided by the driver (at the bottom of the
1558 mid-layer). For instance, the
1559 <function>drm_crtc_helper_set_config</function> function can be used to
1560 fill the struct <structname>drm_crtc_funcs</structname>
1561 <structfield>set_config</structfield> field. When called, it will split
1562 the <methodname>set_config</methodname> operation in smaller, simpler
1563 operations and call the driver to handle them.
1564 </para>
1565 <para>
1566 To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
1567 <function>drm_encoder_helper_add</function> and
1568 <function>drm_connector_helper_add</function> functions to install their
1569 mid-layer bottom operations handlers, and fill the
1570 <structname>drm_crtc_funcs</structname>,
1571 <structname>drm_encoder_funcs</structname> and
1572 <structname>drm_connector_funcs</structname> structures with pointers to
1573 the mid-layer top API functions. Installing the mid-layer bottom operation
1574 handlers is best done right after registering the corresponding KMS object.
1575 </para>
1576 <para>
1577 The mid-layer is not split between CRTC, encoder and connector operations.
1578 To use it, a driver must provide bottom functions for all of the three KMS
1579 entities.
1580 </para>
1581 <sect2>
1582 <title>Atomic Modeset Helper Functions Reference</title>
1583 <sect3>
1584 <title>Overview</title>
1585!Pdrivers/gpu/drm/drm_atomic_helper.c overview
1586 </sect3>
1587 <sect3>
1588 <title>Implementing Asynchronous Atomic Commit</title>
1589!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
1590 </sect3>
1591 <sect3>
1592 <title>Atomic State Reset and Initialization</title>
1593!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
1594 </sect3>
1595!Iinclude/drm/drm_atomic_helper.h
1596!Edrivers/gpu/drm/drm_atomic_helper.c
1597 </sect2>
1598 <sect2>
1599 <title>Modeset Helper Reference for Common Vtables</title>
1600!Iinclude/drm/drm_modeset_helper_vtables.h
1601!Pinclude/drm/drm_modeset_helper_vtables.h overview
1602 </sect2>
1603 <sect2>
1604 <title>Legacy CRTC/Modeset Helper Functions Reference</title>
1605!Edrivers/gpu/drm/drm_crtc_helper.c
1606!Pdrivers/gpu/drm/drm_crtc_helper.c overview
1607 </sect2>
1608 <sect2>
1609 <title>Output Probing Helper Functions Reference</title>
1610!Pdrivers/gpu/drm/drm_probe_helper.c output probing helper overview
1611!Edrivers/gpu/drm/drm_probe_helper.c
1612 </sect2>
1613 <sect2>
1614 <title>fbdev Helper Functions Reference</title>
1615!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
1616!Edrivers/gpu/drm/drm_fb_helper.c
1617!Iinclude/drm/drm_fb_helper.h
1618 </sect2>
1619 <sect2>
1620 <title>Framebuffer CMA Helper Functions Reference</title>
1621!Pdrivers/gpu/drm/drm_fb_cma_helper.c framebuffer cma helper functions
1622!Edrivers/gpu/drm/drm_fb_cma_helper.c
1623 </sect2>
1624 <sect2>
1625 <title>Display Port Helper Functions Reference</title>
1626!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
1627!Iinclude/drm/drm_dp_helper.h
1628!Edrivers/gpu/drm/drm_dp_helper.c
1629 </sect2>
1630 <sect2>
1631 <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
1632!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
1633!Iinclude/drm/drm_dp_dual_mode_helper.h
1634!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
1635 </sect2>
1636 <sect2>
1637 <title>Display Port MST Helper Functions Reference</title>
1638!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
1639!Iinclude/drm/drm_dp_mst_helper.h
1640!Edrivers/gpu/drm/drm_dp_mst_topology.c
1641 </sect2>
1642 <sect2>
1643 <title>MIPI DSI Helper Functions Reference</title>
1644!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
1645!Iinclude/drm/drm_mipi_dsi.h
1646!Edrivers/gpu/drm/drm_mipi_dsi.c
1647 </sect2>
1648 <sect2>
1649 <title>EDID Helper Functions Reference</title>
1650!Edrivers/gpu/drm/drm_edid.c
1651 </sect2>
1652 <sect2>
1653 <title>Rectangle Utilities Reference</title>
1654!Pinclude/drm/drm_rect.h rect utils
1655!Iinclude/drm/drm_rect.h
1656!Edrivers/gpu/drm/drm_rect.c
1657 </sect2>
1658 <sect2>
1659 <title>Flip-work Helper Reference</title>
1660!Pinclude/drm/drm_flip_work.h flip utils
1661!Iinclude/drm/drm_flip_work.h
1662!Edrivers/gpu/drm/drm_flip_work.c
1663 </sect2>
1664 <sect2>
1665 <title>HDMI Infoframes Helper Reference</title>
1666 <para>
1667 Strictly speaking this is not a DRM helper library but generally useable
1668 by any driver interfacing with HDMI outputs like v4l or alsa drivers.
1669 But it nicely fits into the overall topic of mode setting helper
1670 libraries and hence is also included here.
1671 </para>
1672!Iinclude/linux/hdmi.h
1673!Edrivers/video/hdmi.c
1674 </sect2>
1675 <sect2>
1676 <title id="drm-kms-planehelpers">Plane Helper Reference</title>
1677!Edrivers/gpu/drm/drm_plane_helper.c
1678!Pdrivers/gpu/drm/drm_plane_helper.c overview
1679 </sect2>
1680 <sect2>
1681 <title>Tile group</title>
1682!Pdrivers/gpu/drm/drm_crtc.c Tile group
1683 </sect2>
1684 <sect2>
1685 <title>Bridges</title>
1686 <sect3>
1687 <title>Overview</title>
1688!Pdrivers/gpu/drm/drm_bridge.c overview
1689 </sect3>
1690 <sect3>
1691 <title>Default bridge callback sequence</title>
1692!Pdrivers/gpu/drm/drm_bridge.c bridge callbacks
1693 </sect3>
1694!Edrivers/gpu/drm/drm_bridge.c
1695 </sect2>
1696 <sect2>
1697 <title>Panel Helper Reference</title>
1698!Iinclude/drm/drm_panel.h
1699!Edrivers/gpu/drm/drm_panel.c
1700!Pdrivers/gpu/drm/drm_panel.c drm panel
1701 </sect2>
1702 </sect1>
1703
1704 <!-- Internals: kms properties -->
1705
1706 <sect1 id="drm-kms-properties">
1707 <title>KMS Properties</title>
1708 <para>
1709 Drivers may need to expose additional parameters to applications than
1710 those described in the previous sections. KMS supports attaching
1711 properties to CRTCs, connectors and planes and offers a userspace API to
1712 list, get and set the property values.
1713 </para>
1714 <para>
1715 Properties are identified by a name that uniquely defines the property
1716 purpose, and store an associated value. For all property types except blob
1717 properties the value is a 64-bit unsigned integer.
1718 </para>
1719 <para>
1720 KMS differentiates between properties and property instances. Drivers
1721 first create properties and then create and associate individual instances
1722 of those properties to objects. A property can be instantiated multiple
1723 times and associated with different objects. Values are stored in property
1724 instances, and all other property information are stored in the property
1725 and shared between all instances of the property.
1726 </para>
1727 <para>
1728 Every property is created with a type that influences how the KMS core
1729 handles the property. Supported property types are
1730 <variablelist>
1731 <varlistentry>
1732 <term>DRM_MODE_PROP_RANGE</term>
1733 <listitem><para>Range properties report their minimum and maximum
1734 admissible values. The KMS core verifies that values set by
1735 application fit in that range.</para></listitem>
1736 </varlistentry>
1737 <varlistentry>
1738 <term>DRM_MODE_PROP_ENUM</term>
1739 <listitem><para>Enumerated properties take a numerical value that
1740 ranges from 0 to the number of enumerated values defined by the
1741 property minus one, and associate a free-formed string name to each
1742 value. Applications can retrieve the list of defined value-name pairs
1743 and use the numerical value to get and set property instance values.
1744 </para></listitem>
1745 </varlistentry>
1746 <varlistentry>
1747 <term>DRM_MODE_PROP_BITMASK</term>
1748 <listitem><para>Bitmask properties are enumeration properties that
1749 additionally restrict all enumerated values to the 0..63 range.
1750 Bitmask property instance values combine one or more of the
1751 enumerated bits defined by the property.</para></listitem>
1752 </varlistentry>
1753 <varlistentry>
1754 <term>DRM_MODE_PROP_BLOB</term>
1755 <listitem><para>Blob properties store a binary blob without any format
1756 restriction. The binary blobs are created as KMS standalone objects,
1757 and blob property instance values store the ID of their associated
1758 blob object.</para>
1759 <para>Blob properties are only used for the connector EDID property
1760 and cannot be created by drivers.</para></listitem>
1761 </varlistentry>
1762 </variablelist>
1763 </para>
1764 <para>
1765 To create a property drivers call one of the following functions depending
1766 on the property type. All property creation functions take property flags
1767 and name, as well as type-specific arguments.
1768 <itemizedlist>
1769 <listitem>
1770 <synopsis>struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
1771 const char *name,
1772 uint64_t min, uint64_t max);</synopsis>
1773 <para>Create a range property with the given minimum and maximum
1774 values.</para>
1775 </listitem>
1776 <listitem>
1777 <synopsis>struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
1778 const char *name,
1779 const struct drm_prop_enum_list *props,
1780 int num_values);</synopsis>
1781 <para>Create an enumerated property. The <parameter>props</parameter>
1782 argument points to an array of <parameter>num_values</parameter>
1783 value-name pairs.</para>
1784 </listitem>
1785 <listitem>
1786 <synopsis>struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
1787 int flags, const char *name,
1788 const struct drm_prop_enum_list *props,
1789 int num_values);</synopsis>
1790 <para>Create a bitmask property. The <parameter>props</parameter>
1791 argument points to an array of <parameter>num_values</parameter>
1792 value-name pairs.</para>
1793 </listitem>
1794 </itemizedlist>
1795 </para>
1796 <para>
1797 Properties can additionally be created as immutable, in which case they
1798 will be read-only for applications but can be modified by the driver. To
1799 create an immutable property drivers must set the DRM_MODE_PROP_IMMUTABLE
1800 flag at property creation time.
1801 </para>
1802 <para>
1803 When no array of value-name pairs is readily available at property
1804 creation time for enumerated or range properties, drivers can create
1805 the property using the <function>drm_property_create</function> function
1806 and manually add enumeration value-name pairs by calling the
1807 <function>drm_property_add_enum</function> function. Care must be taken to
1808 properly specify the property type through the <parameter>flags</parameter>
1809 argument.
1810 </para>
1811 <para>
1812 After creating properties drivers can attach property instances to CRTC,
1813 connector and plane objects by calling the
1814 <function>drm_object_attach_property</function>. The function takes a
1815 pointer to the target object, a pointer to the previously created property
1816 and an initial instance value.
1817 </para>
1818 <sect2>
1819 <title>Existing KMS Properties</title>
1820 <para>
1821 The following table gives description of drm properties exposed by various
1822 modules/drivers.
1823 </para>
1824 <table border="1" cellpadding="0" cellspacing="0">
1825 <tbody>
1826 <tr style="font-weight: bold;">
1827 <td valign="top" >Owner Module/Drivers</td>
1828 <td valign="top" >Group</td>
1829 <td valign="top" >Property Name</td>
1830 <td valign="top" >Type</td>
1831 <td valign="top" >Property Values</td>
1832 <td valign="top" >Object attached</td>
1833 <td valign="top" >Description/Restrictions</td>
1834 </tr>
1835 <tr>
1836 <td rowspan="42" valign="top" >DRM</td>
1837 <td rowspan="2" valign="top" >Generic</td>
1838 <td valign="top" >“rotationâ€</td>
1839 <td valign="top" >BITMASK</td>
1840 <td valign="top" >{ 0, "rotate-0" },
1841 { 1, "rotate-90" },
1842 { 2, "rotate-180" },
1843 { 3, "rotate-270" },
1844 { 4, "reflect-x" },
1845 { 5, "reflect-y" }</td>
1846 <td valign="top" >CRTC, Plane</td>
1847 <td valign="top" >rotate-(degrees) rotates the image by the specified amount in degrees
1848 in counter clockwise direction. reflect-x and reflect-y reflects the
1849 image along the specified axis prior to rotation</td>
1850 </tr>
1851 <tr>
1852 <td valign="top" >“scaling modeâ€</td>
1853 <td valign="top" >ENUM</td>
1854 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
1855 <td valign="top" >Connector</td>
1856 <td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
1857 </tr>
1858 <tr>
1859 <td rowspan="5" valign="top" >Connector</td>
1860 <td valign="top" >“EDIDâ€</td>
1861 <td valign="top" >BLOB | IMMUTABLE</td>
1862 <td valign="top" >0</td>
1863 <td valign="top" >Connector</td>
1864 <td valign="top" >Contains id of edid blob ptr object.</td>
1865 </tr>
1866 <tr>
1867 <td valign="top" >“DPMSâ€</td>
1868 <td valign="top" >ENUM</td>
1869 <td valign="top" >{ “Onâ€, “Standbyâ€, “Suspendâ€, “Off†}</td>
1870 <td valign="top" >Connector</td>
1871 <td valign="top" >Contains DPMS operation mode value.</td>
1872 </tr>
1873 <tr>
1874 <td valign="top" >“PATHâ€</td>
1875 <td valign="top" >BLOB | IMMUTABLE</td>
1876 <td valign="top" >0</td>
1877 <td valign="top" >Connector</td>
1878 <td valign="top" >Contains topology path to a connector.</td>
1879 </tr>
1880 <tr>
1881 <td valign="top" >“TILEâ€</td>
1882 <td valign="top" >BLOB | IMMUTABLE</td>
1883 <td valign="top" >0</td>
1884 <td valign="top" >Connector</td>
1885 <td valign="top" >Contains tiling information for a connector.</td>
1886 </tr>
1887 <tr>
1888 <td valign="top" >“CRTC_IDâ€</td>
1889 <td valign="top" >OBJECT</td>
1890 <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
1891 <td valign="top" >Connector</td>
1892 <td valign="top" >CRTC that connector is attached to (atomic)</td>
1893 </tr>
1894 <tr>
1895 <td rowspan="11" valign="top" >Plane</td>
1896 <td valign="top" >“typeâ€</td>
1897 <td valign="top" >ENUM | IMMUTABLE</td>
1898 <td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
1899 <td valign="top" >Plane</td>
1900 <td valign="top" >Plane type</td>
1901 </tr>
1902 <tr>
1903 <td valign="top" >“SRC_Xâ€</td>
1904 <td valign="top" >RANGE</td>
1905 <td valign="top" >Min=0, Max=UINT_MAX</td>
1906 <td valign="top" >Plane</td>
1907 <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
1908 </tr>
1909 <tr>
1910 <td valign="top" >“SRC_Yâ€</td>
1911 <td valign="top" >RANGE</td>
1912 <td valign="top" >Min=0, Max=UINT_MAX</td>
1913 <td valign="top" >Plane</td>
1914 <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
1915 </tr>
1916 <tr>
1917 <td valign="top" >“SRC_Wâ€</td>
1918 <td valign="top" >RANGE</td>
1919 <td valign="top" >Min=0, Max=UINT_MAX</td>
1920 <td valign="top" >Plane</td>
1921 <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
1922 </tr>
1923 <tr>
1924 <td valign="top" >“SRC_Hâ€</td>
1925 <td valign="top" >RANGE</td>
1926 <td valign="top" >Min=0, Max=UINT_MAX</td>
1927 <td valign="top" >Plane</td>
1928 <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
1929 </tr>
1930 <tr>
1931 <td valign="top" >“CRTC_Xâ€</td>
1932 <td valign="top" >SIGNED_RANGE</td>
1933 <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
1934 <td valign="top" >Plane</td>
1935 <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
1936 </tr>
1937 <tr>
1938 <td valign="top" >“CRTC_Yâ€</td>
1939 <td valign="top" >SIGNED_RANGE</td>
1940 <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
1941 <td valign="top" >Plane</td>
1942 <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
1943 </tr>
1944 <tr>
1945 <td valign="top" >“CRTC_Wâ€</td>
1946 <td valign="top" >RANGE</td>
1947 <td valign="top" >Min=0, Max=UINT_MAX</td>
1948 <td valign="top" >Plane</td>
1949 <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
1950 </tr>
1951 <tr>
1952 <td valign="top" >“CRTC_Hâ€</td>
1953 <td valign="top" >RANGE</td>
1954 <td valign="top" >Min=0, Max=UINT_MAX</td>
1955 <td valign="top" >Plane</td>
1956 <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
1957 </tr>
1958 <tr>
1959 <td valign="top" >“FB_IDâ€</td>
1960 <td valign="top" >OBJECT</td>
1961 <td valign="top" >DRM_MODE_OBJECT_FB</td>
1962 <td valign="top" >Plane</td>
1963 <td valign="top" >Scanout framebuffer (atomic)</td>
1964 </tr>
1965 <tr>
1966 <td valign="top" >“CRTC_IDâ€</td>
1967 <td valign="top" >OBJECT</td>
1968 <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
1969 <td valign="top" >Plane</td>
1970 <td valign="top" >CRTC that plane is attached to (atomic)</td>
1971 </tr>
1972 <tr>
1973 <td rowspan="2" valign="top" >DVI-I</td>
1974 <td valign="top" >“subconnectorâ€</td>
1975 <td valign="top" >ENUM</td>
1976 <td valign="top" >{ “Unknownâ€, “DVI-Dâ€, “DVI-A†}</td>
1977 <td valign="top" >Connector</td>
1978 <td valign="top" >TBD</td>
1979 </tr>
1980 <tr>
1981 <td valign="top" >“select subconnectorâ€</td>
1982 <td valign="top" >ENUM</td>
1983 <td valign="top" >{ “Automaticâ€, “DVI-Dâ€, “DVI-A†}</td>
1984 <td valign="top" >Connector</td>
1985 <td valign="top" >TBD</td>
1986 </tr>
1987 <tr>
1988 <td rowspan="13" valign="top" >TV</td>
1989 <td valign="top" >“subconnectorâ€</td>
1990 <td valign="top" >ENUM</td>
1991 <td valign="top" >{ "Unknown", "Composite", "SVIDEO", "Component", "SCART" }</td>
1992 <td valign="top" >Connector</td>
1993 <td valign="top" >TBD</td>
1994 </tr>
1995 <tr>
1996 <td valign="top" >“select subconnectorâ€</td>
1997 <td valign="top" >ENUM</td>
1998 <td valign="top" >{ "Automatic", "Composite", "SVIDEO", "Component", "SCART" }</td>
1999 <td valign="top" >Connector</td>
2000 <td valign="top" >TBD</td>
2001 </tr>
2002 <tr>
2003 <td valign="top" >“modeâ€</td>
2004 <td valign="top" >ENUM</td>
2005 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2006 <td valign="top" >Connector</td>
2007 <td valign="top" >TBD</td>
2008 </tr>
2009 <tr>
2010 <td valign="top" >“left marginâ€</td>
2011 <td valign="top" >RANGE</td>
2012 <td valign="top" >Min=0, Max=100</td>
2013 <td valign="top" >Connector</td>
2014 <td valign="top" >TBD</td>
2015 </tr>
2016 <tr>
2017 <td valign="top" >“right marginâ€</td>
2018 <td valign="top" >RANGE</td>
2019 <td valign="top" >Min=0, Max=100</td>
2020 <td valign="top" >Connector</td>
2021 <td valign="top" >TBD</td>
2022 </tr>
2023 <tr>
2024 <td valign="top" >“top marginâ€</td>
2025 <td valign="top" >RANGE</td>
2026 <td valign="top" >Min=0, Max=100</td>
2027 <td valign="top" >Connector</td>
2028 <td valign="top" >TBD</td>
2029 </tr>
2030 <tr>
2031 <td valign="top" >“bottom marginâ€</td>
2032 <td valign="top" >RANGE</td>
2033 <td valign="top" >Min=0, Max=100</td>
2034 <td valign="top" >Connector</td>
2035 <td valign="top" >TBD</td>
2036 </tr>
2037 <tr>
2038 <td valign="top" >“brightnessâ€</td>
2039 <td valign="top" >RANGE</td>
2040 <td valign="top" >Min=0, Max=100</td>
2041 <td valign="top" >Connector</td>
2042 <td valign="top" >TBD</td>
2043 </tr>
2044 <tr>
2045 <td valign="top" >“contrastâ€</td>
2046 <td valign="top" >RANGE</td>
2047 <td valign="top" >Min=0, Max=100</td>
2048 <td valign="top" >Connector</td>
2049 <td valign="top" >TBD</td>
2050 </tr>
2051 <tr>
2052 <td valign="top" >“flicker reductionâ€</td>
2053 <td valign="top" >RANGE</td>
2054 <td valign="top" >Min=0, Max=100</td>
2055 <td valign="top" >Connector</td>
2056 <td valign="top" >TBD</td>
2057 </tr>
2058 <tr>
2059 <td valign="top" >“overscanâ€</td>
2060 <td valign="top" >RANGE</td>
2061 <td valign="top" >Min=0, Max=100</td>
2062 <td valign="top" >Connector</td>
2063 <td valign="top" >TBD</td>
2064 </tr>
2065 <tr>
2066 <td valign="top" >“saturationâ€</td>
2067 <td valign="top" >RANGE</td>
2068 <td valign="top" >Min=0, Max=100</td>
2069 <td valign="top" >Connector</td>
2070 <td valign="top" >TBD</td>
2071 </tr>
2072 <tr>
2073 <td valign="top" >“hueâ€</td>
2074 <td valign="top" >RANGE</td>
2075 <td valign="top" >Min=0, Max=100</td>
2076 <td valign="top" >Connector</td>
2077 <td valign="top" >TBD</td>
2078 </tr>
2079 <tr>
2080 <td rowspan="2" valign="top" >Virtual GPU</td>
2081 <td valign="top" >“suggested Xâ€</td>
2082 <td valign="top" >RANGE</td>
2083 <td valign="top" >Min=0, Max=0xffffffff</td>
2084 <td valign="top" >Connector</td>
2085 <td valign="top" >property to suggest an X offset for a connector</td>
2086 </tr>
2087 <tr>
2088 <td valign="top" >“suggested Yâ€</td>
2089 <td valign="top" >RANGE</td>
2090 <td valign="top" >Min=0, Max=0xffffffff</td>
2091 <td valign="top" >Connector</td>
2092 <td valign="top" >property to suggest an Y offset for a connector</td>
2093 </tr>
2094 <tr>
2095 <td rowspan="7" valign="top" >Optional</td>
2096 <td valign="top" >"aspect ratio"</td>
2097 <td valign="top" >ENUM</td>
2098 <td valign="top" >{ "None", "4:3", "16:9" }</td>
2099 <td valign="top" >Connector</td>
2100 <td valign="top" >TDB</td>
2101 </tr>
2102 <tr>
2103 <td valign="top" >“dirtyâ€</td>
2104 <td valign="top" >ENUM | IMMUTABLE</td>
2105 <td valign="top" >{ "Off", "On", "Annotate" }</td>
2106 <td valign="top" >Connector</td>
2107 <td valign="top" >TBD</td>
2108 </tr>
2109 <tr>
2110 <td valign="top" >“DEGAMMA_LUTâ€</td>
2111 <td valign="top" >BLOB</td>
2112 <td valign="top" >0</td>
2113 <td valign="top" >CRTC</td>
2114 <td valign="top" >DRM property to set the degamma lookup table
2115 (LUT) mapping pixel data from the framebuffer before it is
2116 given to the transformation matrix. The data is an interpreted
2117 as an array of struct drm_color_lut elements. Hardware might
2118 choose not to use the full precision of the LUT elements nor
2119 use all the elements of the LUT (for example the hardware
2120 might choose to interpolate between LUT[0] and LUT[4]). </td>
2121 </tr>
2122 <tr>
2123 <td valign="top" >“DEGAMMA_LUT_SIZEâ€</td>
2124 <td valign="top" >RANGE | IMMUTABLE</td>
2125 <td valign="top" >Min=0, Max=UINT_MAX</td>
2126 <td valign="top" >CRTC</td>
2127 <td valign="top" >DRM property to gives the size of the lookup
2128 table to be set on the DEGAMMA_LUT property (the size depends
2129 on the underlying hardware).</td>
2130 </tr>
2131 <tr>
2132 <td valign="top" >“CTMâ€</td>
2133 <td valign="top" >BLOB</td>
2134 <td valign="top" >0</td>
2135 <td valign="top" >CRTC</td>
2136 <td valign="top" >DRM property to set the current
2137 transformation matrix (CTM) apply to pixel data after the
2138 lookup through the degamma LUT and before the lookup through
2139 the gamma LUT. The data is an interpreted as a struct
2140 drm_color_ctm.</td>
2141 </tr>
2142 <tr>
2143 <td valign="top" >“GAMMA_LUTâ€</td>
2144 <td valign="top" >BLOB</td>
2145 <td valign="top" >0</td>
2146 <td valign="top" >CRTC</td>
2147 <td valign="top" >DRM property to set the gamma lookup table
2148 (LUT) mapping pixel data after to the transformation matrix to
2149 data sent to the connector. The data is an interpreted as an
2150 array of struct drm_color_lut elements. Hardware might choose
2151 not to use the full precision of the LUT elements nor use all
2152 the elements of the LUT (for example the hardware might choose
2153 to interpolate between LUT[0] and LUT[4]).</td>
2154 </tr>
2155 <tr>
2156 <td valign="top" >“GAMMA_LUT_SIZEâ€</td>
2157 <td valign="top" >RANGE | IMMUTABLE</td>
2158 <td valign="top" >Min=0, Max=UINT_MAX</td>
2159 <td valign="top" >CRTC</td>
2160 <td valign="top" >DRM property to gives the size of the lookup
2161 table to be set on the GAMMA_LUT property (the size depends on
2162 the underlying hardware).</td>
2163 </tr>
2164 <tr>
2165 <td rowspan="20" valign="top" >i915</td>
2166 <td rowspan="2" valign="top" >Generic</td>
2167 <td valign="top" >"Broadcast RGB"</td>
2168 <td valign="top" >ENUM</td>
2169 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
2170 <td valign="top" >Connector</td>
2171 <td valign="top" >When this property is set to Limited 16:235
2172 and CTM is set, the hardware will be programmed with the
2173 result of the multiplication of CTM by the limited range
2174 matrix to ensure the pixels normaly in the range 0..1.0 are
2175 remapped to the range 16/255..235/255.</td>
2176 </tr>
2177 <tr>
2178 <td valign="top" >“audioâ€</td>
2179 <td valign="top" >ENUM</td>
2180 <td valign="top" >{ "force-dvi", "off", "auto", "on" }</td>
2181 <td valign="top" >Connector</td>
2182 <td valign="top" >TBD</td>
2183 </tr>
2184 <tr>
2185 <td rowspan="17" valign="top" >SDVO-TV</td>
2186 <td valign="top" >“modeâ€</td>
2187 <td valign="top" >ENUM</td>
2188 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2189 <td valign="top" >Connector</td>
2190 <td valign="top" >TBD</td>
2191 </tr>
2192 <tr>
2193 <td valign="top" >"left_margin"</td>
2194 <td valign="top" >RANGE</td>
2195 <td valign="top" >Min=0, Max= SDVO dependent</td>
2196 <td valign="top" >Connector</td>
2197 <td valign="top" >TBD</td>
2198 </tr>
2199 <tr>
2200 <td valign="top" >"right_margin"</td>
2201 <td valign="top" >RANGE</td>
2202 <td valign="top" >Min=0, Max= SDVO dependent</td>
2203 <td valign="top" >Connector</td>
2204 <td valign="top" >TBD</td>
2205 </tr>
2206 <tr>
2207 <td valign="top" >"top_margin"</td>
2208 <td valign="top" >RANGE</td>
2209 <td valign="top" >Min=0, Max= SDVO dependent</td>
2210 <td valign="top" >Connector</td>
2211 <td valign="top" >TBD</td>
2212 </tr>
2213 <tr>
2214 <td valign="top" >"bottom_margin"</td>
2215 <td valign="top" >RANGE</td>
2216 <td valign="top" >Min=0, Max= SDVO dependent</td>
2217 <td valign="top" >Connector</td>
2218 <td valign="top" >TBD</td>
2219 </tr>
2220 <tr>
2221 <td valign="top" >“hposâ€</td>
2222 <td valign="top" >RANGE</td>
2223 <td valign="top" >Min=0, Max= SDVO dependent</td>
2224 <td valign="top" >Connector</td>
2225 <td valign="top" >TBD</td>
2226 </tr>
2227 <tr>
2228 <td valign="top" >“vposâ€</td>
2229 <td valign="top" >RANGE</td>
2230 <td valign="top" >Min=0, Max= SDVO dependent</td>
2231 <td valign="top" >Connector</td>
2232 <td valign="top" >TBD</td>
2233 </tr>
2234 <tr>
2235 <td valign="top" >“contrastâ€</td>
2236 <td valign="top" >RANGE</td>
2237 <td valign="top" >Min=0, Max= SDVO dependent</td>
2238 <td valign="top" >Connector</td>
2239 <td valign="top" >TBD</td>
2240 </tr>
2241 <tr>
2242 <td valign="top" >“saturationâ€</td>
2243 <td valign="top" >RANGE</td>
2244 <td valign="top" >Min=0, Max= SDVO dependent</td>
2245 <td valign="top" >Connector</td>
2246 <td valign="top" >TBD</td>
2247 </tr>
2248 <tr>
2249 <td valign="top" >“hueâ€</td>
2250 <td valign="top" >RANGE</td>
2251 <td valign="top" >Min=0, Max= SDVO dependent</td>
2252 <td valign="top" >Connector</td>
2253 <td valign="top" >TBD</td>
2254 </tr>
2255 <tr>
2256 <td valign="top" >“sharpnessâ€</td>
2257 <td valign="top" >RANGE</td>
2258 <td valign="top" >Min=0, Max= SDVO dependent</td>
2259 <td valign="top" >Connector</td>
2260 <td valign="top" >TBD</td>
2261 </tr>
2262 <tr>
2263 <td valign="top" >“flicker_filterâ€</td>
2264 <td valign="top" >RANGE</td>
2265 <td valign="top" >Min=0, Max= SDVO dependent</td>
2266 <td valign="top" >Connector</td>
2267 <td valign="top" >TBD</td>
2268 </tr>
2269 <tr>
2270 <td valign="top" >“flicker_filter_adaptiveâ€</td>
2271 <td valign="top" >RANGE</td>
2272 <td valign="top" >Min=0, Max= SDVO dependent</td>
2273 <td valign="top" >Connector</td>
2274 <td valign="top" >TBD</td>
2275 </tr>
2276 <tr>
2277 <td valign="top" >“flicker_filter_2dâ€</td>
2278 <td valign="top" >RANGE</td>
2279 <td valign="top" >Min=0, Max= SDVO dependent</td>
2280 <td valign="top" >Connector</td>
2281 <td valign="top" >TBD</td>
2282 </tr>
2283 <tr>
2284 <td valign="top" >“tv_chroma_filterâ€</td>
2285 <td valign="top" >RANGE</td>
2286 <td valign="top" >Min=0, Max= SDVO dependent</td>
2287 <td valign="top" >Connector</td>
2288 <td valign="top" >TBD</td>
2289 </tr>
2290 <tr>
2291 <td valign="top" >“tv_luma_filterâ€</td>
2292 <td valign="top" >RANGE</td>
2293 <td valign="top" >Min=0, Max= SDVO dependent</td>
2294 <td valign="top" >Connector</td>
2295 <td valign="top" >TBD</td>
2296 </tr>
2297 <tr>
2298 <td valign="top" >“dot_crawlâ€</td>
2299 <td valign="top" >RANGE</td>
2300 <td valign="top" >Min=0, Max=1</td>
2301 <td valign="top" >Connector</td>
2302 <td valign="top" >TBD</td>
2303 </tr>
2304 <tr>
2305 <td valign="top" >SDVO-TV/LVDS</td>
2306 <td valign="top" >“brightnessâ€</td>
2307 <td valign="top" >RANGE</td>
2308 <td valign="top" >Min=0, Max= SDVO dependent</td>
2309 <td valign="top" >Connector</td>
2310 <td valign="top" >TBD</td>
2311 </tr>
2312 <tr>
2313 <td rowspan="2" valign="top" >CDV gma-500</td>
2314 <td rowspan="2" valign="top" >Generic</td>
2315 <td valign="top" >"Broadcast RGB"</td>
2316 <td valign="top" >ENUM</td>
2317 <td valign="top" >{ “Fullâ€, “Limited 16:235†}</td>
2318 <td valign="top" >Connector</td>
2319 <td valign="top" >TBD</td>
2320 </tr>
2321 <tr>
2322 <td valign="top" >"Broadcast RGB"</td>
2323 <td valign="top" >ENUM</td>
2324 <td valign="top" >{ “offâ€, “autoâ€, “on†}</td>
2325 <td valign="top" >Connector</td>
2326 <td valign="top" >TBD</td>
2327 </tr>
2328 <tr>
2329 <td rowspan="19" valign="top" >Poulsbo</td>
2330 <td rowspan="1" valign="top" >Generic</td>
2331 <td valign="top" >“backlightâ€</td>
2332 <td valign="top" >RANGE</td>
2333 <td valign="top" >Min=0, Max=100</td>
2334 <td valign="top" >Connector</td>
2335 <td valign="top" >TBD</td>
2336 </tr>
2337 <tr>
2338 <td rowspan="17" valign="top" >SDVO-TV</td>
2339 <td valign="top" >“modeâ€</td>
2340 <td valign="top" >ENUM</td>
2341 <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
2342 <td valign="top" >Connector</td>
2343 <td valign="top" >TBD</td>
2344 </tr>
2345 <tr>
2346 <td valign="top" >"left_margin"</td>
2347 <td valign="top" >RANGE</td>
2348 <td valign="top" >Min=0, Max= SDVO dependent</td>
2349 <td valign="top" >Connector</td>
2350 <td valign="top" >TBD</td>
2351 </tr>
2352 <tr>
2353 <td valign="top" >"right_margin"</td>
2354 <td valign="top" >RANGE</td>
2355 <td valign="top" >Min=0, Max= SDVO dependent</td>
2356 <td valign="top" >Connector</td>
2357 <td valign="top" >TBD</td>
2358 </tr>
2359 <tr>
2360 <td valign="top" >"top_margin"</td>
2361 <td valign="top" >RANGE</td>
2362 <td valign="top" >Min=0, Max= SDVO dependent</td>
2363 <td valign="top" >Connector</td>
2364 <td valign="top" >TBD</td>
2365 </tr>
2366 <tr>
2367 <td valign="top" >"bottom_margin"</td>
2368 <td valign="top" >RANGE</td>
2369 <td valign="top" >Min=0, Max= SDVO dependent</td>
2370 <td valign="top" >Connector</td>
2371 <td valign="top" >TBD</td>
2372 </tr>
2373 <tr>
2374 <td valign="top" >“hposâ€</td>
2375 <td valign="top" >RANGE</td>
2376 <td valign="top" >Min=0, Max= SDVO dependent</td>
2377 <td valign="top" >Connector</td>
2378 <td valign="top" >TBD</td>
2379 </tr>
2380 <tr>
2381 <td valign="top" >“vposâ€</td>
2382 <td valign="top" >RANGE</td>
2383 <td valign="top" >Min=0, Max= SDVO dependent</td>
2384 <td valign="top" >Connector</td>
2385 <td valign="top" >TBD</td>
2386 </tr>
2387 <tr>
2388 <td valign="top" >“contrastâ€</td>
2389 <td valign="top" >RANGE</td>
2390 <td valign="top" >Min=0, Max= SDVO dependent</td>
2391 <td valign="top" >Connector</td>
2392 <td valign="top" >TBD</td>
2393 </tr>
2394 <tr>
2395 <td valign="top" >“saturationâ€</td>
2396 <td valign="top" >RANGE</td>
2397 <td valign="top" >Min=0, Max= SDVO dependent</td>
2398 <td valign="top" >Connector</td>
2399 <td valign="top" >TBD</td>
2400 </tr>
2401 <tr>
2402 <td valign="top" >“hueâ€</td>
2403 <td valign="top" >RANGE</td>
2404 <td valign="top" >Min=0, Max= SDVO dependent</td>
2405 <td valign="top" >Connector</td>
2406 <td valign="top" >TBD</td>
2407 </tr>
2408 <tr>
2409 <td valign="top" >“sharpnessâ€</td>
2410 <td valign="top" >RANGE</td>
2411 <td valign="top" >Min=0, Max= SDVO dependent</td>
2412 <td valign="top" >Connector</td>
2413 <td valign="top" >TBD</td>
2414 </tr>
2415 <tr>
2416 <td valign="top" >“flicker_filterâ€</td>
2417 <td valign="top" >RANGE</td>
2418 <td valign="top" >Min=0, Max= SDVO dependent</td>
2419 <td valign="top" >Connector</td>
2420 <td valign="top" >TBD</td>
2421 </tr>
2422 <tr>
2423 <td valign="top" >“flicker_filter_adaptiveâ€</td>
2424 <td valign="top" >RANGE</td>
2425 <td valign="top" >Min=0, Max= SDVO dependent</td>
2426 <td valign="top" >Connector</td>
2427 <td valign="top" >TBD</td>
2428 </tr>
2429 <tr>
2430 <td valign="top" >“flicker_filter_2dâ€</td>
2431 <td valign="top" >RANGE</td>
2432 <td valign="top" >Min=0, Max= SDVO dependent</td>
2433 <td valign="top" >Connector</td>
2434 <td valign="top" >TBD</td>
2435 </tr>
2436 <tr>
2437 <td valign="top" >“tv_chroma_filterâ€</td>
2438 <td valign="top" >RANGE</td>
2439 <td valign="top" >Min=0, Max= SDVO dependent</td>
2440 <td valign="top" >Connector</td>
2441 <td valign="top" >TBD</td>
2442 </tr>
2443 <tr>
2444 <td valign="top" >“tv_luma_filterâ€</td>
2445 <td valign="top" >RANGE</td>
2446 <td valign="top" >Min=0, Max= SDVO dependent</td>
2447 <td valign="top" >Connector</td>
2448 <td valign="top" >TBD</td>
2449 </tr>
2450 <tr>
2451 <td valign="top" >“dot_crawlâ€</td>
2452 <td valign="top" >RANGE</td>
2453 <td valign="top" >Min=0, Max=1</td>
2454 <td valign="top" >Connector</td>
2455 <td valign="top" >TBD</td>
2456 </tr>
2457 <tr>
2458 <td valign="top" >SDVO-TV/LVDS</td>
2459 <td valign="top" >“brightnessâ€</td>
2460 <td valign="top" >RANGE</td>
2461 <td valign="top" >Min=0, Max= SDVO dependent</td>
2462 <td valign="top" >Connector</td>
2463 <td valign="top" >TBD</td>
2464 </tr>
2465 <tr>
2466 <td rowspan="11" valign="top" >armada</td>
2467 <td rowspan="2" valign="top" >CRTC</td>
2468 <td valign="top" >"CSC_YUV"</td>
2469 <td valign="top" >ENUM</td>
2470 <td valign="top" >{ "Auto" , "CCIR601", "CCIR709" }</td>
2471 <td valign="top" >CRTC</td>
2472 <td valign="top" >TBD</td>
2473 </tr>
2474 <tr>
2475 <td valign="top" >"CSC_RGB"</td>
2476 <td valign="top" >ENUM</td>
2477 <td valign="top" >{ "Auto", "Computer system", "Studio" }</td>
2478 <td valign="top" >CRTC</td>
2479 <td valign="top" >TBD</td>
2480 </tr>
2481 <tr>
2482 <td rowspan="9" valign="top" >Overlay</td>
2483 <td valign="top" >"colorkey"</td>
2484 <td valign="top" >RANGE</td>
2485 <td valign="top" >Min=0, Max=0xffffff</td>
2486 <td valign="top" >Plane</td>
2487 <td valign="top" >TBD</td>
2488 </tr>
2489 <tr>
2490 <td valign="top" >"colorkey_min"</td>
2491 <td valign="top" >RANGE</td>
2492 <td valign="top" >Min=0, Max=0xffffff</td>
2493 <td valign="top" >Plane</td>
2494 <td valign="top" >TBD</td>
2495 </tr>
2496 <tr>
2497 <td valign="top" >"colorkey_max"</td>
2498 <td valign="top" >RANGE</td>
2499 <td valign="top" >Min=0, Max=0xffffff</td>
2500 <td valign="top" >Plane</td>
2501 <td valign="top" >TBD</td>
2502 </tr>
2503 <tr>
2504 <td valign="top" >"colorkey_val"</td>
2505 <td valign="top" >RANGE</td>
2506 <td valign="top" >Min=0, Max=0xffffff</td>
2507 <td valign="top" >Plane</td>
2508 <td valign="top" >TBD</td>
2509 </tr>
2510 <tr>
2511 <td valign="top" >"colorkey_alpha"</td>
2512 <td valign="top" >RANGE</td>
2513 <td valign="top" >Min=0, Max=0xffffff</td>
2514 <td valign="top" >Plane</td>
2515 <td valign="top" >TBD</td>
2516 </tr>
2517 <tr>
2518 <td valign="top" >"colorkey_mode"</td>
2519 <td valign="top" >ENUM</td>
2520 <td valign="top" >{ "disabled", "Y component", "U component"
2521 , "V component", "RGB", “R component", "G component", "B component" }</td>
2522 <td valign="top" >Plane</td>
2523 <td valign="top" >TBD</td>
2524 </tr>
2525 <tr>
2526 <td valign="top" >"brightness"</td>
2527 <td valign="top" >RANGE</td>
2528 <td valign="top" >Min=0, Max=256 + 255</td>
2529 <td valign="top" >Plane</td>
2530 <td valign="top" >TBD</td>
2531 </tr>
2532 <tr>
2533 <td valign="top" >"contrast"</td>
2534 <td valign="top" >RANGE</td>
2535 <td valign="top" >Min=0, Max=0x7fff</td>
2536 <td valign="top" >Plane</td>
2537 <td valign="top" >TBD</td>
2538 </tr>
2539 <tr>
2540 <td valign="top" >"saturation"</td>
2541 <td valign="top" >RANGE</td>
2542 <td valign="top" >Min=0, Max=0x7fff</td>
2543 <td valign="top" >Plane</td>
2544 <td valign="top" >TBD</td>
2545 </tr>
2546 <tr>
2547 <td rowspan="2" valign="top" >exynos</td>
2548 <td valign="top" >CRTC</td>
2549 <td valign="top" >“modeâ€</td>
2550 <td valign="top" >ENUM</td>
2551 <td valign="top" >{ "normal", "blank" }</td>
2552 <td valign="top" >CRTC</td>
2553 <td valign="top" >TBD</td>
2554 </tr>
2555 <tr>
2556 <td valign="top" >Overlay</td>
2557 <td valign="top" >“zposâ€</td>
2558 <td valign="top" >RANGE</td>
2559 <td valign="top" >Min=0, Max=MAX_PLANE-1</td>
2560 <td valign="top" >Plane</td>
2561 <td valign="top" >TBD</td>
2562 </tr>
2563 <tr>
2564 <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
2565 <td valign="top" >Generic</td>
2566 <td valign="top" >“scaleâ€</td>
2567 <td valign="top" >RANGE</td>
2568 <td valign="top" >Min=0, Max=2</td>
2569 <td valign="top" >Connector</td>
2570 <td valign="top" >TBD</td>
2571 </tr>
2572 <tr>
2573 <td rowspan="1" valign="top" >TV</td>
2574 <td valign="top" >“modeâ€</td>
2575 <td valign="top" >ENUM</td>
2576 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, â€PAL-Nc"
2577 , "PAL-60", "NTSC-M", "NTSC-J" }</td>
2578 <td valign="top" >Connector</td>
2579 <td valign="top" >TBD</td>
2580 </tr>
2581 <tr>
2582 <td rowspan="15" valign="top" >nouveau</td>
2583 <td rowspan="6" valign="top" >NV10 Overlay</td>
2584 <td valign="top" >"colorkey"</td>
2585 <td valign="top" >RANGE</td>
2586 <td valign="top" >Min=0, Max=0x01ffffff</td>
2587 <td valign="top" >Plane</td>
2588 <td valign="top" >TBD</td>
2589 </tr>
2590 <tr>
2591 <td valign="top" >“contrastâ€</td>
2592 <td valign="top" >RANGE</td>
2593 <td valign="top" >Min=0, Max=8192-1</td>
2594 <td valign="top" >Plane</td>
2595 <td valign="top" >TBD</td>
2596 </tr>
2597 <tr>
2598 <td valign="top" >“brightnessâ€</td>
2599 <td valign="top" >RANGE</td>
2600 <td valign="top" >Min=0, Max=1024</td>
2601 <td valign="top" >Plane</td>
2602 <td valign="top" >TBD</td>
2603 </tr>
2604 <tr>
2605 <td valign="top" >“hueâ€</td>
2606 <td valign="top" >RANGE</td>
2607 <td valign="top" >Min=0, Max=359</td>
2608 <td valign="top" >Plane</td>
2609 <td valign="top" >TBD</td>
2610 </tr>
2611 <tr>
2612 <td valign="top" >“saturationâ€</td>
2613 <td valign="top" >RANGE</td>
2614 <td valign="top" >Min=0, Max=8192-1</td>
2615 <td valign="top" >Plane</td>
2616 <td valign="top" >TBD</td>
2617 </tr>
2618 <tr>
2619 <td valign="top" >“iturbt_709â€</td>
2620 <td valign="top" >RANGE</td>
2621 <td valign="top" >Min=0, Max=1</td>
2622 <td valign="top" >Plane</td>
2623 <td valign="top" >TBD</td>
2624 </tr>
2625 <tr>
2626 <td rowspan="2" valign="top" >Nv04 Overlay</td>
2627 <td valign="top" >“colorkeyâ€</td>
2628 <td valign="top" >RANGE</td>
2629 <td valign="top" >Min=0, Max=0x01ffffff</td>
2630 <td valign="top" >Plane</td>
2631 <td valign="top" >TBD</td>
2632 </tr>
2633 <tr>
2634 <td valign="top" >“brightnessâ€</td>
2635 <td valign="top" >RANGE</td>
2636 <td valign="top" >Min=0, Max=1024</td>
2637 <td valign="top" >Plane</td>
2638 <td valign="top" >TBD</td>
2639 </tr>
2640 <tr>
2641 <td rowspan="7" valign="top" >Display</td>
2642 <td valign="top" >“dithering modeâ€</td>
2643 <td valign="top" >ENUM</td>
2644 <td valign="top" >{ "auto", "off", "on" }</td>
2645 <td valign="top" >Connector</td>
2646 <td valign="top" >TBD</td>
2647 </tr>
2648 <tr>
2649 <td valign="top" >“dithering depthâ€</td>
2650 <td valign="top" >ENUM</td>
2651 <td valign="top" >{ "auto", "off", "on", "static 2x2", "dynamic 2x2", "temporal" }</td>
2652 <td valign="top" >Connector</td>
2653 <td valign="top" >TBD</td>
2654 </tr>
2655 <tr>
2656 <td valign="top" >“underscanâ€</td>
2657 <td valign="top" >ENUM</td>
2658 <td valign="top" >{ "auto", "6 bpc", "8 bpc" }</td>
2659 <td valign="top" >Connector</td>
2660 <td valign="top" >TBD</td>
2661 </tr>
2662 <tr>
2663 <td valign="top" >“underscan hborderâ€</td>
2664 <td valign="top" >RANGE</td>
2665 <td valign="top" >Min=0, Max=128</td>
2666 <td valign="top" >Connector</td>
2667 <td valign="top" >TBD</td>
2668 </tr>
2669 <tr>
2670 <td valign="top" >“underscan vborderâ€</td>
2671 <td valign="top" >RANGE</td>
2672 <td valign="top" >Min=0, Max=128</td>
2673 <td valign="top" >Connector</td>
2674 <td valign="top" >TBD</td>
2675 </tr>
2676 <tr>
2677 <td valign="top" >“vibrant hueâ€</td>
2678 <td valign="top" >RANGE</td>
2679 <td valign="top" >Min=0, Max=180</td>
2680 <td valign="top" >Connector</td>
2681 <td valign="top" >TBD</td>
2682 </tr>
2683 <tr>
2684 <td valign="top" >“color vibranceâ€</td>
2685 <td valign="top" >RANGE</td>
2686 <td valign="top" >Min=0, Max=200</td>
2687 <td valign="top" >Connector</td>
2688 <td valign="top" >TBD</td>
2689 </tr>
2690 <tr>
2691 <td valign="top" >omap</td>
2692 <td valign="top" >Generic</td>
2693 <td valign="top" >“zorderâ€</td>
2694 <td valign="top" >RANGE</td>
2695 <td valign="top" >Min=0, Max=3</td>
2696 <td valign="top" >CRTC, Plane</td>
2697 <td valign="top" >TBD</td>
2698 </tr>
2699 <tr>
2700 <td valign="top" >qxl</td>
2701 <td valign="top" >Generic</td>
2702 <td valign="top" >“hotplug_mode_update"</td>
2703 <td valign="top" >RANGE</td>
2704 <td valign="top" >Min=0, Max=1</td>
2705 <td valign="top" >Connector</td>
2706 <td valign="top" >TBD</td>
2707 </tr>
2708 <tr>
2709 <td rowspan="9" valign="top" >radeon</td>
2710 <td valign="top" >DVI-I</td>
2711 <td valign="top" >“coherentâ€</td>
2712 <td valign="top" >RANGE</td>
2713 <td valign="top" >Min=0, Max=1</td>
2714 <td valign="top" >Connector</td>
2715 <td valign="top" >TBD</td>
2716 </tr>
2717 <tr>
2718 <td valign="top" >DAC enable load detect</td>
2719 <td valign="top" >“load detectionâ€</td>
2720 <td valign="top" >RANGE</td>
2721 <td valign="top" >Min=0, Max=1</td>
2722 <td valign="top" >Connector</td>
2723 <td valign="top" >TBD</td>
2724 </tr>
2725 <tr>
2726 <td valign="top" >TV Standard</td>
2727 <td valign="top" >"tv standard"</td>
2728 <td valign="top" >ENUM</td>
2729 <td valign="top" >{ "ntsc", "pal", "pal-m", "pal-60", "ntsc-j"
2730 , "scart-pal", "pal-cn", "secam" }</td>
2731 <td valign="top" >Connector</td>
2732 <td valign="top" >TBD</td>
2733 </tr>
2734 <tr>
2735 <td valign="top" >legacy TMDS PLL detect</td>
2736 <td valign="top" >"tmds_pll"</td>
2737 <td valign="top" >ENUM</td>
2738 <td valign="top" >{ "driver", "bios" }</td>
2739 <td valign="top" >-</td>
2740 <td valign="top" >TBD</td>
2741 </tr>
2742 <tr>
2743 <td rowspan="3" valign="top" >Underscan</td>
2744 <td valign="top" >"underscan"</td>
2745 <td valign="top" >ENUM</td>
2746 <td valign="top" >{ "off", "on", "auto" }</td>
2747 <td valign="top" >Connector</td>
2748 <td valign="top" >TBD</td>
2749 </tr>
2750 <tr>
2751 <td valign="top" >"underscan hborder"</td>
2752 <td valign="top" >RANGE</td>
2753 <td valign="top" >Min=0, Max=128</td>
2754 <td valign="top" >Connector</td>
2755 <td valign="top" >TBD</td>
2756 </tr>
2757 <tr>
2758 <td valign="top" >"underscan vborder"</td>
2759 <td valign="top" >RANGE</td>
2760 <td valign="top" >Min=0, Max=128</td>
2761 <td valign="top" >Connector</td>
2762 <td valign="top" >TBD</td>
2763 </tr>
2764 <tr>
2765 <td valign="top" >Audio</td>
2766 <td valign="top" >“audioâ€</td>
2767 <td valign="top" >ENUM</td>
2768 <td valign="top" >{ "off", "on", "auto" }</td>
2769 <td valign="top" >Connector</td>
2770 <td valign="top" >TBD</td>
2771 </tr>
2772 <tr>
2773 <td valign="top" >FMT Dithering</td>
2774 <td valign="top" >“ditherâ€</td>
2775 <td valign="top" >ENUM</td>
2776 <td valign="top" >{ "off", "on" }</td>
2777 <td valign="top" >Connector</td>
2778 <td valign="top" >TBD</td>
2779 </tr>
2780 <tr>
2781 <td rowspan="3" valign="top" >rcar-du</td>
2782 <td rowspan="3" valign="top" >Generic</td>
2783 <td valign="top" >"alpha"</td>
2784 <td valign="top" >RANGE</td>
2785 <td valign="top" >Min=0, Max=255</td>
2786 <td valign="top" >Plane</td>
2787 <td valign="top" >TBD</td>
2788 </tr>
2789 <tr>
2790 <td valign="top" >"colorkey"</td>
2791 <td valign="top" >RANGE</td>
2792 <td valign="top" >Min=0, Max=0x01ffffff</td>
2793 <td valign="top" >Plane</td>
2794 <td valign="top" >TBD</td>
2795 </tr>
2796 <tr>
2797 <td valign="top" >"zpos"</td>
2798 <td valign="top" >RANGE</td>
2799 <td valign="top" >Min=1, Max=7</td>
2800 <td valign="top" >Plane</td>
2801 <td valign="top" >TBD</td>
2802 </tr>
2803 </tbody>
2804 </table>
2805 </sect2>
2806 </sect1>
2807
2808 <!-- Internals: vertical blanking -->
2809
2810 <sect1 id="drm-vertical-blank">
2811 <title>Vertical Blanking</title>
2812 <para>
2813 Vertical blanking plays a major role in graphics rendering. To achieve
2814 tear-free display, users must synchronize page flips and/or rendering to
2815 vertical blanking. The DRM API offers ioctls to perform page flips
2816 synchronized to vertical blanking and wait for vertical blanking.
2817 </para>
2818 <para>
2819 The DRM core handles most of the vertical blanking management logic, which
2820 involves filtering out spurious interrupts, keeping race-free blanking
2821 counters, coping with counter wrap-around and resets and keeping use
2822 counts. It relies on the driver to generate vertical blanking interrupts
2823 and optionally provide a hardware vertical blanking counter. Drivers must
2824 implement the following operations.
2825 </para>
2826 <itemizedlist>
2827 <listitem>
2828 <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
2829void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
2830 <para>
2831 Enable or disable vertical blanking interrupts for the given CRTC.
2832 </para>
2833 </listitem>
2834 <listitem>
2835 <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
2836 <para>
2837 Retrieve the value of the vertical blanking counter for the given
2838 CRTC. If the hardware maintains a vertical blanking counter its value
2839 should be returned. Otherwise drivers can use the
2840 <function>drm_vblank_count</function> helper function to handle this
2841 operation.
2842 </para>
2843 </listitem>
2844 </itemizedlist>
2845 <para>
2846 Drivers must initialize the vertical blanking handling core with a call to
2847 <function>drm_vblank_init</function> in their
2848 <methodname>load</methodname> operation. The function will set the struct
2849 <structname>drm_device</structname>
2850 <structfield>vblank_disable_allowed</structfield> field to 0. This will
2851 keep vertical blanking interrupts enabled permanently until the first mode
2852 set operation, where <structfield>vblank_disable_allowed</structfield> is
2853 set to 1. The reason behind this is not clear. Drivers can set the field
2854 to 1 after <function>calling drm_vblank_init</function> to make vertical
2855 blanking interrupts dynamically managed from the beginning.
2856 </para>
2857 <para>
2858 Vertical blanking interrupts can be enabled by the DRM core or by drivers
2859 themselves (for instance to handle page flipping operations). The DRM core
2860 maintains a vertical blanking use count to ensure that the interrupts are
2861 not disabled while a user still needs them. To increment the use count,
2862 drivers call <function>drm_vblank_get</function>. Upon return vertical
2863 blanking interrupts are guaranteed to be enabled.
2864 </para>
2865 <para>
2866 To decrement the use count drivers call
2867 <function>drm_vblank_put</function>. Only when the use count drops to zero
2868 will the DRM core disable the vertical blanking interrupts after a delay
2869 by scheduling a timer. The delay is accessible through the vblankoffdelay
2870 module parameter or the <varname>drm_vblank_offdelay</varname> global
2871 variable and expressed in milliseconds. Its default value is 5000 ms.
2872 Zero means never disable, and a negative value means disable immediately.
2873 Drivers may override the behaviour by setting the
2874 <structname>drm_device</structname>
2875 <structfield>vblank_disable_immediate</structfield> flag, which when set
2876 causes vblank interrupts to be disabled immediately regardless of the
2877 drm_vblank_offdelay value. The flag should only be set if there's a
2878 properly working hardware vblank counter present.
2879 </para>
2880 <para>
2881 When a vertical blanking interrupt occurs drivers only need to call the
2882 <function>drm_handle_vblank</function> function to account for the
2883 interrupt.
2884 </para>
2885 <para>
2886 Resources allocated by <function>drm_vblank_init</function> must be freed
2887 with a call to <function>drm_vblank_cleanup</function> in the driver
2888 <methodname>unload</methodname> operation handler.
2889 </para>
2890 <sect2>
2891 <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
2892!Edrivers/gpu/drm/drm_irq.c
2893!Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
2894 </sect2>
2895 </sect1>
2896
2897 <!-- Internals: open/close, file operations and ioctls -->
2898
2899 <sect1>
2900 <title>Open/Close, File Operations and IOCTLs</title>
2901 <sect2>
2902 <title>Open and Close</title>
2903 <synopsis>int (*firstopen) (struct drm_device *);
2904void (*lastclose) (struct drm_device *);
2905int (*open) (struct drm_device *, struct drm_file *);
2906void (*preclose) (struct drm_device *, struct drm_file *);
2907void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
2908 <abstract>Open and close handlers. None of those methods are mandatory.
2909 </abstract>
2910 <para>
2911 The <methodname>firstopen</methodname> method is called by the DRM core
2912 for legacy UMS (User Mode Setting) drivers only when an application
2913 opens a device that has no other opened file handle. UMS drivers can
2914 implement it to acquire device resources. KMS drivers can't use the
2915 method and must acquire resources in the <methodname>load</methodname>
2916 method instead.
2917 </para>
2918 <para>
2919 Similarly the <methodname>lastclose</methodname> method is called when
2920 the last application holding a file handle opened on the device closes
2921 it, for both UMS and KMS drivers. Additionally, the method is also
2922 called at module unload time or, for hot-pluggable devices, when the
2923 device is unplugged. The <methodname>firstopen</methodname> and
2924 <methodname>lastclose</methodname> calls can thus be unbalanced.
2925 </para>
2926 <para>
2927 The <methodname>open</methodname> method is called every time the device
2928 is opened by an application. Drivers can allocate per-file private data
2929 in this method and store them in the struct
2930 <structname>drm_file</structname> <structfield>driver_priv</structfield>
2931 field. Note that the <methodname>open</methodname> method is called
2932 before <methodname>firstopen</methodname>.
2933 </para>
2934 <para>
2935 The close operation is split into <methodname>preclose</methodname> and
2936 <methodname>postclose</methodname> methods. Drivers must stop and
2937 cleanup all per-file operations in the <methodname>preclose</methodname>
2938 method. For instance pending vertical blanking and page flip events must
2939 be cancelled. No per-file operation is allowed on the file handle after
2940 returning from the <methodname>preclose</methodname> method.
2941 </para>
2942 <para>
2943 Finally the <methodname>postclose</methodname> method is called as the
2944 last step of the close operation, right before calling the
2945 <methodname>lastclose</methodname> method if no other open file handle
2946 exists for the device. Drivers that have allocated per-file private data
2947 in the <methodname>open</methodname> method should free it here.
2948 </para>
2949 <para>
2950 The <methodname>lastclose</methodname> method should restore CRTC and
2951 plane properties to default value, so that a subsequent open of the
2952 device will not inherit state from the previous user. It can also be
2953 used to execute delayed power switching state changes, e.g. in
2954 conjunction with the vga_switcheroo infrastructure (see
2955 <xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
2956 do any further cleanup. Only legacy UMS drivers might need to clean up
2957 device state so that the vga console or an independent fbdev driver
2958 could take over.
2959 </para>
2960 </sect2>
2961 <sect2>
2962 <title>File Operations</title>
2963!Pdrivers/gpu/drm/drm_fops.c file operations
2964!Edrivers/gpu/drm/drm_fops.c
2965 </sect2>
2966 <sect2>
2967 <title>IOCTLs</title>
2968 <synopsis>struct drm_ioctl_desc *ioctls;
2969int num_ioctls;</synopsis>
2970 <abstract>Driver-specific ioctls descriptors table.</abstract>
2971 <para>
2972 Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
2973 descriptors table is indexed by the ioctl number offset from the base
2974 value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
2975 table entries.
2976 </para>
2977 <para>
2978 <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
2979 <para>
2980 <parameter>ioctl</parameter> is the ioctl name. Drivers must define
2981 the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
2982 offset from DRM_COMMAND_BASE and the ioctl number respectively. The
2983 first macro is private to the device while the second must be exposed
2984 to userspace in a public header.
2985 </para>
2986 <para>
2987 <parameter>func</parameter> is a pointer to the ioctl handler function
2988 compatible with the <type>drm_ioctl_t</type> type.
2989 <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
2990 struct drm_file *file_priv);</programlisting>
2991 </para>
2992 <para>
2993 <parameter>flags</parameter> is a bitmask combination of the following
2994 values. It restricts how the ioctl is allowed to be called.
2995 <itemizedlist>
2996 <listitem><para>
2997 DRM_AUTH - Only authenticated callers allowed
2998 </para></listitem>
2999 <listitem><para>
3000 DRM_MASTER - The ioctl can only be called on the master file
3001 handle
3002 </para></listitem>
3003 <listitem><para>
3004 DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
3005 </para></listitem>
3006 <listitem><para>
3007 DRM_CONTROL_ALLOW - The ioctl can only be called on a control
3008 device
3009 </para></listitem>
3010 <listitem><para>
3011 DRM_UNLOCKED - The ioctl handler will be called without locking
3012 the DRM global mutex. This is the enforced default for kms drivers
3013 (i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
3014 any more for new drivers.
3015 </para></listitem>
3016 </itemizedlist>
3017 </para>
3018 </para>
3019!Edrivers/gpu/drm/drm_ioctl.c
3020 </sect2>
3021 </sect1>
3022 <sect1>
3023 <title>Legacy Support Code</title>
3024 <para>
3025 The section very briefly covers some of the old legacy support code which
3026 is only used by old DRM drivers which have done a so-called shadow-attach
3027 to the underlying device instead of registering as a real driver. This
3028 also includes some of the old generic buffer management and command
3029 submission code. Do not use any of this in new and modern drivers.
3030 </para>
3031
3032 <sect2>
3033 <title>Legacy Suspend/Resume</title>
3034 <para>
3035 The DRM core provides some suspend/resume code, but drivers wanting full
3036 suspend/resume support should provide save() and restore() functions.
3037 These are called at suspend, hibernate, or resume time, and should perform
3038 any state save or restore required by your device across suspend or
3039 hibernate states.
3040 </para>
3041 <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
3042 int (*resume) (struct drm_device *);</synopsis>
3043 <para>
3044 Those are legacy suspend and resume methods which
3045 <emphasis>only</emphasis> work with the legacy shadow-attach driver
3046 registration functions. New driver should use the power management
3047 interface provided by their bus type (usually through
3048 the struct <structname>device_driver</structname> dev_pm_ops) and set
3049 these methods to NULL.
3050 </para>
3051 </sect2>
3052
3053 <sect2>
3054 <title>Legacy DMA Services</title>
3055 <para>
3056 This should cover how DMA mapping etc. is supported by the core.
3057 These functions are deprecated and should not be used.
3058 </para>
3059 </sect2>
3060 </sect1>
3061 </chapter>
3062
3063<!-- TODO
3064
3065- Add a glossary
3066- Document the struct_mutex catch-all lock
3067- Document connector properties
3068
3069- Why is the load method optional?
3070- What are drivers supposed to set the initial display state to, and how?
3071 Connector's DPMS states are not initialized and are thus equal to
3072 DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
3073 drm_helper_disable_unused_functions(), which disables unused encoders and
3074 CRTCs, but doesn't touch the connectors' DPMS state, and
3075 drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
3076 that don't implement (or just don't use) fbcon compatibility need to call
3077 those functions themselves?
3078- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
3079 around mode setting. Should this be done in the DRM core?
3080- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
3081 call and never set back to 0. It seems to be safe to permanently set it to 1
3082 in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
3083 well. This should be investigated.
3084- crtc and connector .save and .restore operations are only used internally in
3085 drivers, should they be removed from the core?
3086- encoder mid-layer .save and .restore operations are only used internally in
3087 drivers, should they be removed from the core?
3088- encoder mid-layer .detect operation is only used internally in drivers,
3089 should it be removed from the core?
3090-->
3091
3092 <!-- External interfaces -->
3093
3094 <chapter id="drmExternals">
3095 <title>Userland interfaces</title>
3096 <para>
3097 The DRM core exports several interfaces to applications,
3098 generally intended to be used through corresponding libdrm
3099 wrapper functions. In addition, drivers export device-specific
3100 interfaces for use by userspace drivers &amp; device-aware
3101 applications through ioctls and sysfs files.
3102 </para>
3103 <para>
3104 External interfaces include: memory mapping, context management,
3105 DMA operations, AGP management, vblank control, fence
3106 management, memory management, and output management.
3107 </para>
3108 <para>
3109 Cover generic ioctls and sysfs layout here. We only need high-level
3110 info, since man pages should cover the rest.
3111 </para>
3112
3113 <!-- External: render nodes -->
3114
3115 <sect1>
3116 <title>Render nodes</title>
3117 <para>
3118 DRM core provides multiple character-devices for user-space to use.
3119 Depending on which device is opened, user-space can perform a different
3120 set of operations (mainly ioctls). The primary node is always created
3121 and called card&lt;num&gt;. Additionally, a currently
3122 unused control node, called controlD&lt;num&gt; is also
3123 created. The primary node provides all legacy operations and
3124 historically was the only interface used by userspace. With KMS, the
3125 control node was introduced. However, the planned KMS control interface
3126 has never been written and so the control node stays unused to date.
3127 </para>
3128 <para>
3129 With the increased use of offscreen renderers and GPGPU applications,
3130 clients no longer require running compositors or graphics servers to
3131 make use of a GPU. But the DRM API required unprivileged clients to
3132 authenticate to a DRM-Master prior to getting GPU access. To avoid this
3133 step and to grant clients GPU access without authenticating, render
3134 nodes were introduced. Render nodes solely serve render clients, that
3135 is, no modesetting or privileged ioctls can be issued on render nodes.
3136 Only non-global rendering commands are allowed. If a driver supports
3137 render nodes, it must advertise it via the DRIVER_RENDER
3138 DRM driver capability. If not supported, the primary node must be used
3139 for render clients together with the legacy drmAuth authentication
3140 procedure.
3141 </para>
3142 <para>
3143 If a driver advertises render node support, DRM core will create a
3144 separate render node called renderD&lt;num&gt;. There will
3145 be one render node per device. No ioctls except PRIME-related ioctls
3146 will be allowed on this node. Especially GEM_OPEN will be
3147 explicitly prohibited. Render nodes are designed to avoid the
3148 buffer-leaks, which occur if clients guess the flink names or mmap
3149 offsets on the legacy interface. Additionally to this basic interface,
3150 drivers must mark their driver-dependent render-only ioctls as
3151 DRM_RENDER_ALLOW so render clients can use them. Driver
3152 authors must be careful not to allow any privileged ioctls on render
3153 nodes.
3154 </para>
3155 <para>
3156 With render nodes, user-space can now control access to the render node
3157 via basic file-system access-modes. A running graphics server which
3158 authenticates clients on the privileged primary/legacy node is no longer
3159 required. Instead, a client can open the render node and is immediately
3160 granted GPU access. Communication between clients (or servers) is done
3161 via PRIME. FLINK from render node to legacy node is not supported. New
3162 clients must not use the insecure FLINK interface.
3163 </para>
3164 <para>
3165 Besides dropping all modeset/global ioctls, render nodes also drop the
3166 DRM-Master concept. There is no reason to associate render clients with
3167 a DRM-Master as they are independent of any graphics server. Besides,
3168 they must work without any running master, anyway.
3169 Drivers must be able to run without a master object if they support
3170 render nodes. If, on the other hand, a driver requires shared state
3171 between clients which is visible to user-space and accessible beyond
3172 open-file boundaries, they cannot support render nodes.
3173 </para>
3174 </sect1>
3175
3176 <!-- External: vblank handling -->
3177
3178 <sect1>
3179 <title>VBlank event handling</title>
3180 <para>
3181 The DRM core exposes two vertical blank related ioctls:
3182 <variablelist>
3183 <varlistentry>
3184 <term>DRM_IOCTL_WAIT_VBLANK</term>
3185 <listitem>
3186 <para>
3187 This takes a struct drm_wait_vblank structure as its argument,
3188 and it is used to block or request a signal when a specified
3189 vblank event occurs.
3190 </para>
3191 </listitem>
3192 </varlistentry>
3193 <varlistentry>
3194 <term>DRM_IOCTL_MODESET_CTL</term>
3195 <listitem>
3196 <para>
3197 This was only used for user-mode-settind drivers around
3198 modesetting changes to allow the kernel to update the vblank
3199 interrupt after mode setting, since on many devices the vertical
3200 blank counter is reset to 0 at some point during modeset. Modern
3201 drivers should not call this any more since with kernel mode
3202 setting it is a no-op.
3203 </para>
3204 </listitem>
3205 </varlistentry>
3206 </variablelist>
3207 </para>
3208 </sect1>
3209
3210 </chapter>
3211</part>
3212<part id="drmDrivers">
3213 <title>DRM Drivers</title>
3214
3215 <partintro>
3216 <para>
3217 This second part of the GPU Driver Developer's Guide documents driver
3218 code, implementation details and also all the driver-specific userspace
3219 interfaces. Especially since all hardware-acceleration interfaces to
3220 userspace are driver specific for efficiency and other reasons these
3221 interfaces can be rather substantial. Hence every driver has its own
3222 chapter.
3223 </para>
3224 </partintro>
3225
3226 <chapter id="drmI915">
3227 <title>drm/i915 Intel GFX Driver</title>
3228 <para>
3229 The drm/i915 driver supports all (with the exception of some very early
3230 models) integrated GFX chipsets with both Intel display and rendering
3231 blocks. This excludes a set of SoC platforms with an SGX rendering unit,
3232 those have basic support through the gma500 drm driver.
3233 </para>
3234 <sect1>
3235 <title>Core Driver Infrastructure</title>
3236 <para>
3237 This section covers core driver infrastructure used by both the display
3238 and the GEM parts of the driver.
3239 </para>
3240 <sect2>
3241 <title>Runtime Power Management</title>
3242!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
3243!Idrivers/gpu/drm/i915/intel_runtime_pm.c
3244!Idrivers/gpu/drm/i915/intel_uncore.c
3245 </sect2>
3246 <sect2>
3247 <title>Interrupt Handling</title>
3248!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
3249!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
3250!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
3251!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
3252 </sect2>
3253 <sect2>
3254 <title>Intel GVT-g Guest Support(vGPU)</title>
3255!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
3256!Idrivers/gpu/drm/i915/i915_vgpu.c
3257 </sect2>
3258 </sect1>
3259 <sect1>
3260 <title>Display Hardware Handling</title>
3261 <para>
3262 This section covers everything related to the display hardware including
3263 the mode setting infrastructure, plane, sprite and cursor handling and
3264 display, output probing and related topics.
3265 </para>
3266 <sect2>
3267 <title>Mode Setting Infrastructure</title>
3268 <para>
3269 The i915 driver is thus far the only DRM driver which doesn't use the
3270 common DRM helper code to implement mode setting sequences. Thus it
3271 has its own tailor-made infrastructure for executing a display
3272 configuration change.
3273 </para>
3274 </sect2>
3275 <sect2>
3276 <title>Frontbuffer Tracking</title>
3277!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
3278!Idrivers/gpu/drm/i915/intel_frontbuffer.c
3279!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
3280 </sect2>
3281 <sect2>
3282 <title>Display FIFO Underrun Reporting</title>
3283!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
3284!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
3285 </sect2>
3286 <sect2>
3287 <title>Plane Configuration</title>
3288 <para>
3289 This section covers plane configuration and composition with the
3290 primary plane, sprites, cursors and overlays. This includes the
3291 infrastructure to do atomic vsync'ed updates of all this state and
3292 also tightly coupled topics like watermark setup and computation,
3293 framebuffer compression and panel self refresh.
3294 </para>
3295 </sect2>
3296 <sect2>
3297 <title>Atomic Plane Helpers</title>
3298!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
3299!Idrivers/gpu/drm/i915/intel_atomic_plane.c
3300 </sect2>
3301 <sect2>
3302 <title>Output Probing</title>
3303 <para>
3304 This section covers output probing and related infrastructure like the
3305 hotplug interrupt storm detection and mitigation code. Note that the
3306 i915 driver still uses most of the common DRM helper code for output
3307 probing, so those sections fully apply.
3308 </para>
3309 </sect2>
3310 <sect2>
3311 <title>Hotplug</title>
3312!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
3313!Idrivers/gpu/drm/i915/intel_hotplug.c
3314 </sect2>
3315 <sect2>
3316 <title>High Definition Audio</title>
3317!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
3318!Idrivers/gpu/drm/i915/intel_audio.c
3319!Iinclude/drm/i915_component.h
3320 </sect2>
3321 <sect2>
3322 <title>Panel Self Refresh PSR (PSR/SRD)</title>
3323!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
3324!Idrivers/gpu/drm/i915/intel_psr.c
3325 </sect2>
3326 <sect2>
3327 <title>Frame Buffer Compression (FBC)</title>
3328!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
3329!Idrivers/gpu/drm/i915/intel_fbc.c
3330 </sect2>
3331 <sect2>
3332 <title>Display Refresh Rate Switching (DRRS)</title>
3333!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
3334!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
3335!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
3336!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
3337!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
3338!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
3339!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
3340
3341 </sect2>
3342 <sect2>
3343 <title>DPIO</title>
3344!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
3345 </sect2>
3346
3347 <sect2>
3348 <title>CSR firmware support for DMC</title>
3349!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
3350!Idrivers/gpu/drm/i915/intel_csr.c
3351 </sect2>
3352 <sect2>
3353 <title>Video BIOS Table (VBT)</title>
3354!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
3355!Idrivers/gpu/drm/i915/intel_bios.c
3356!Idrivers/gpu/drm/i915/intel_vbt_defs.h
3357 </sect2>
3358 </sect1>
3359
3360 <sect1>
3361 <title>Memory Management and Command Submission</title>
3362 <para>
3363 This sections covers all things related to the GEM implementation in the
3364 i915 driver.
3365 </para>
3366 <sect2>
3367 <title>Batchbuffer Parsing</title>
3368!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
3369!Idrivers/gpu/drm/i915/i915_cmd_parser.c
3370 </sect2>
3371 <sect2>
3372 <title>Batchbuffer Pools</title>
3373!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
3374!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
3375 </sect2>
3376 <sect2>
3377 <title>Logical Rings, Logical Ring Contexts and Execlists</title>
3378!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
3379!Idrivers/gpu/drm/i915/intel_lrc.c
3380 </sect2>
3381 <sect2>
3382 <title>Global GTT views</title>
3383!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
3384!Idrivers/gpu/drm/i915/i915_gem_gtt.c
3385 </sect2>
3386 <sect2>
3387 <title>GTT Fences and Swizzling</title>
3388!Idrivers/gpu/drm/i915/i915_gem_fence.c
3389 <sect3>
3390 <title>Global GTT Fence Handling</title>
3391!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
3392 </sect3>
3393 <sect3>
3394 <title>Hardware Tiling and Swizzling Details</title>
3395!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
3396 </sect3>
3397 </sect2>
3398 <sect2>
3399 <title>Object Tiling IOCTLs</title>
3400!Idrivers/gpu/drm/i915/i915_gem_tiling.c
3401!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
3402 </sect2>
3403 <sect2>
3404 <title>Buffer Object Eviction</title>
3405 <para>
3406 This section documents the interface functions for evicting buffer
3407 objects to make space available in the virtual gpu address spaces.
3408 Note that this is mostly orthogonal to shrinking buffer objects
3409 caches, which has the goal to make main memory (shared with the gpu
3410 through the unified memory architecture) available.
3411 </para>
3412!Idrivers/gpu/drm/i915/i915_gem_evict.c
3413 </sect2>
3414 <sect2>
3415 <title>Buffer Object Memory Shrinking</title>
3416 <para>
3417 This section documents the interface function for shrinking memory
3418 usage of buffer object caches. Shrinking is used to make main memory
3419 available. Note that this is mostly orthogonal to evicting buffer
3420 objects, which has the goal to make space in gpu virtual address
3421 spaces.
3422 </para>
3423!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
3424 </sect2>
3425 </sect1>
3426 <sect1>
3427 <title>GuC</title>
3428 <sect2>
3429 <title>GuC-specific firmware loader</title>
3430!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
3431!Idrivers/gpu/drm/i915/intel_guc_loader.c
3432 </sect2>
3433 <sect2>
3434 <title>GuC-based command submission</title>
3435!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
3436!Idrivers/gpu/drm/i915/i915_guc_submission.c
3437 </sect2>
3438 <sect2>
3439 <title>GuC Firmware Layout</title>
3440!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
3441 </sect2>
3442 </sect1>
3443
3444 <sect1>
3445 <title> Tracing </title>
3446 <para>
3447 This sections covers all things related to the tracepoints implemented in
3448 the i915 driver.
3449 </para>
3450 <sect2>
3451 <title> i915_ppgtt_create and i915_ppgtt_release </title>
3452!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
3453 </sect2>
3454 <sect2>
3455 <title> i915_context_create and i915_context_free </title>
3456!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
3457 </sect2>
3458 <sect2>
3459 <title> switch_mm </title>
3460!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
3461 </sect2>
3462 </sect1>
3463
3464 </chapter>
3465!Cdrivers/gpu/drm/i915/i915_irq.c
3466</part>
3467
3468<part id="vga_switcheroo">
3469 <title>vga_switcheroo</title>
3470 <partintro>
3471!Pdrivers/gpu/vga/vga_switcheroo.c Overview
3472 </partintro>
3473
3474 <chapter id="modes_of_use">
3475 <title>Modes of Use</title>
3476 <sect1>
3477 <title>Manual switching and manual power control</title>
3478!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
3479 </sect1>
3480 <sect1>
3481 <title>Driver power control</title>
3482!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
3483 </sect1>
3484 </chapter>
3485
3486 <chapter id="api">
3487 <title>API</title>
3488 <sect1>
3489 <title>Public functions</title>
3490!Edrivers/gpu/vga/vga_switcheroo.c
3491 </sect1>
3492 <sect1>
3493 <title>Public structures</title>
3494!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
3495!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
3496 </sect1>
3497 <sect1>
3498 <title>Public constants</title>
3499!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t
3500!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
3501!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
3502 </sect1>
3503 <sect1>
3504 <title>Private structures</title>
3505!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
3506!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
3507 </sect1>
3508 </chapter>
3509
3510 <chapter id="handlers">
3511 <title>Handlers</title>
3512 <sect1>
3513 <title>apple-gmux Handler</title>
3514!Pdrivers/platform/x86/apple-gmux.c Overview
3515!Pdrivers/platform/x86/apple-gmux.c Interrupt
3516 <sect2>
3517 <title>Graphics mux</title>
3518!Pdrivers/platform/x86/apple-gmux.c Graphics mux
3519 </sect2>
3520 <sect2>
3521 <title>Power control</title>
3522!Pdrivers/platform/x86/apple-gmux.c Power control
3523 </sect2>
3524 <sect2>
3525 <title>Backlight control</title>
3526!Pdrivers/platform/x86/apple-gmux.c Backlight control
3527 </sect2>
3528 <sect2>
3529 <title>Public functions</title>
3530!Iinclude/linux/apple-gmux.h
3531 </sect2>
3532 </sect1>
3533 </chapter>
3534
3535!Cdrivers/gpu/vga/vga_switcheroo.c
3536!Cinclude/linux/vga_switcheroo.h
3537!Cdrivers/platform/x86/apple-gmux.c
3538</part>
3539
3540</book>
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
new file mode 100644
index 000000000000..addf32309bc3
--- /dev/null
+++ b/Documentation/Makefile.sphinx
@@ -0,0 +1,63 @@
1# -*- makefile -*-
2# Makefile for Sphinx documentation
3#
4
5# You can set these variables from the command line.
6SPHINXBUILD = sphinx-build
7SPHINXOPTS =
8PAPER =
9BUILDDIR = $(obj)/output
10
11# User-friendly check for sphinx-build
12HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)
13
14ifeq ($(HAVE_SPHINX),0)
15
16.DEFAULT:
17 $(warning The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed and in PATH, or set the SPHINXBUILD make variable to point to the full path of the '$(SPHINXBUILD)' executable.)
18 @echo " SKIP Sphinx $@ target."
19
20else # HAVE_SPHINX
21
22# User-friendly check for rst2pdf
23HAVE_RST2PDF := $(shell if python -c "import rst2pdf" >/dev/null 2>&1; then echo 1; else echo 0; fi)
24
25# Internal variables.
26PAPEROPT_a4 = -D latex_paper_size=a4
27PAPEROPT_letter = -D latex_paper_size=letter
28KERNELDOC = $(srctree)/scripts/kernel-doc
29KERNELDOC_CONF = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
30ALLSPHINXOPTS = -D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) -d $(BUILDDIR)/.doctrees $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) -c $(srctree)/$(src) $(SPHINXOPTS) $(srctree)/$(src)
31# the i18n builder cannot share the environment and doctrees with the others
32I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
33
34quiet_cmd_sphinx = SPHINX $@
35 cmd_sphinx = $(SPHINXBUILD) -b $2 $(ALLSPHINXOPTS) $(BUILDDIR)/$2
36
37htmldocs:
38 $(call cmd,sphinx,html)
39
40pdfdocs:
41ifeq ($(HAVE_RST2PDF),0)
42 $(warning The Python 'rst2pdf' module was not found. Make sure you have the module installed to produce PDF output.)
43 @echo " SKIP Sphinx $@ target."
44else # HAVE_RST2PDF
45 $(call cmd,sphinx,pdf)
46endif # HAVE_RST2PDF
47
48epubdocs:
49 $(call cmd,sphinx,epub)
50
51xmldocs:
52 $(call cmd,sphinx,xml)
53
54# no-ops for the Sphinx toolchain
55sgmldocs:
56psdocs:
57mandocs:
58installmandocs:
59
60cleandocs:
61 $(Q)rm -rf $(BUILDDIR)
62
63endif # HAVE_SPHINX
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index c6938e50e71f..4da60b463995 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@ stable kernels.
56| ARM | MMU-500 | #841119,#826419 | N/A | 56| ARM | MMU-500 | #841119,#826419 | N/A |
57| | | | | 57| | | | |
58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
59| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
59| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | 60| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
60| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | 61| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
61| Cavium | ThunderX SMMUv2 | #27704 | N/A | 62| Cavium | ThunderX SMMUv2 | #27704 | N/A |
diff --git a/Documentation/conf.py b/Documentation/conf.py
new file mode 100644
index 000000000000..6cc41a0555a3
--- /dev/null
+++ b/Documentation/conf.py
@@ -0,0 +1,414 @@
1# -*- coding: utf-8 -*-
2#
3# The Linux Kernel documentation build configuration file, created by
4# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
5#
6# This file is execfile()d with the current directory set to its
7# containing dir.
8#
9# Note that not all possible configuration values are present in this
10# autogenerated file.
11#
12# All configuration values have a default; values that are commented out
13# serve to show the default.
14
15import sys
16import os
17
18# If extensions (or modules to document with autodoc) are in another directory,
19# add these directories to sys.path here. If the directory is relative to the
20# documentation root, use os.path.abspath to make it absolute, like shown here.
21sys.path.insert(0, os.path.abspath('sphinx'))
22
23# -- General configuration ------------------------------------------------
24
25# If your documentation needs a minimal Sphinx version, state it here.
26#needs_sphinx = '1.0'
27
28# Add any Sphinx extension module names here, as strings. They can be
29# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
30# ones.
31extensions = ['kernel-doc']
32
33# Gracefully handle missing rst2pdf.
34try:
35 import rst2pdf
36 extensions += ['rst2pdf.pdfbuilder']
37except ImportError:
38 pass
39
40# Add any paths that contain templates here, relative to this directory.
41templates_path = ['_templates']
42
43# The suffix(es) of source filenames.
44# You can specify multiple suffix as a list of string:
45# source_suffix = ['.rst', '.md']
46source_suffix = '.rst'
47
48# The encoding of source files.
49#source_encoding = 'utf-8-sig'
50
51# The master toctree document.
52master_doc = 'index'
53
54# General information about the project.
55project = 'The Linux Kernel'
56copyright = '2016, The kernel development community'
57author = 'The kernel development community'
58
59# The version info for the project you're documenting, acts as replacement for
60# |version| and |release|, also used in various other places throughout the
61# built documents.
62#
63# In a normal build, version and release are are set to KERNELVERSION and
64# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
65# arguments.
66#
67# The following code tries to extract the information by reading the Makefile,
68# when Sphinx is run directly (e.g. by Read the Docs).
69try:
70 makefile_version = None
71 makefile_patchlevel = None
72 for line in open('../Makefile'):
73 key, val = [x.strip() for x in line.split('=', 2)]
74 if key == 'VERSION':
75 makefile_version = val
76 elif key == 'PATCHLEVEL':
77 makefile_patchlevel = val
78 if makefile_version and makefile_patchlevel:
79 break
80except:
81 pass
82finally:
83 if makefile_version and makefile_patchlevel:
84 version = release = makefile_version + '.' + makefile_patchlevel
85 else:
86 sys.stderr.write('Warning: Could not extract kernel version\n')
87 version = release = "unknown version"
88
89# The language for content autogenerated by Sphinx. Refer to documentation
90# for a list of supported languages.
91#
92# This is also used if you do content translation via gettext catalogs.
93# Usually you set "language" from the command line for these cases.
94language = None
95
96# There are two options for replacing |today|: either, you set today to some
97# non-false value, then it is used:
98#today = ''
99# Else, today_fmt is used as the format for a strftime call.
100#today_fmt = '%B %d, %Y'
101
102# List of patterns, relative to source directory, that match files and
103# directories to ignore when looking for source files.
104exclude_patterns = ['output']
105
106# The reST default role (used for this markup: `text`) to use for all
107# documents.
108#default_role = None
109
110# If true, '()' will be appended to :func: etc. cross-reference text.
111#add_function_parentheses = True
112
113# If true, the current module name will be prepended to all description
114# unit titles (such as .. function::).
115#add_module_names = True
116
117# If true, sectionauthor and moduleauthor directives will be shown in the
118# output. They are ignored by default.
119#show_authors = False
120
121# The name of the Pygments (syntax highlighting) style to use.
122pygments_style = 'sphinx'
123
124# A list of ignored prefixes for module index sorting.
125#modindex_common_prefix = []
126
127# If true, keep warnings as "system message" paragraphs in the built documents.
128#keep_warnings = False
129
130# If true, `todo` and `todoList` produce output, else they produce nothing.
131todo_include_todos = False
132
133primary_domain = 'C'
134highlight_language = 'C'
135
136# -- Options for HTML output ----------------------------------------------
137
138# The theme to use for HTML and HTML Help pages. See the documentation for
139# a list of builtin themes.
140
141# The Read the Docs theme is available from
142# - https://github.com/snide/sphinx_rtd_theme
143# - https://pypi.python.org/pypi/sphinx_rtd_theme
144# - python-sphinx-rtd-theme package (on Debian)
145try:
146 import sphinx_rtd_theme
147 html_theme = 'sphinx_rtd_theme'
148 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
149except ImportError:
150 sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
151
152# Theme options are theme-specific and customize the look and feel of a theme
153# further. For a list of options available for each theme, see the
154# documentation.
155#html_theme_options = {}
156
157# Add any paths that contain custom themes here, relative to this directory.
158#html_theme_path = []
159
160# The name for this set of Sphinx documents. If None, it defaults to
161# "<project> v<release> documentation".
162#html_title = None
163
164# A shorter title for the navigation bar. Default is the same as html_title.
165#html_short_title = None
166
167# The name of an image file (relative to this directory) to place at the top
168# of the sidebar.
169#html_logo = None
170
171# The name of an image file (within the static path) to use as favicon of the
172# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
173# pixels large.
174#html_favicon = None
175
176# Add any paths that contain custom static files (such as style sheets) here,
177# relative to this directory. They are copied after the builtin static files,
178# so a file named "default.css" will overwrite the builtin "default.css".
179#html_static_path = ['_static']
180
181# Add any extra paths that contain custom files (such as robots.txt or
182# .htaccess) here, relative to this directory. These files are copied
183# directly to the root of the documentation.
184#html_extra_path = []
185
186# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
187# using the given strftime format.
188#html_last_updated_fmt = '%b %d, %Y'
189
190# If true, SmartyPants will be used to convert quotes and dashes to
191# typographically correct entities.
192#html_use_smartypants = True
193
194# Custom sidebar templates, maps document names to template names.
195#html_sidebars = {}
196
197# Additional templates that should be rendered to pages, maps page names to
198# template names.
199#html_additional_pages = {}
200
201# If false, no module index is generated.
202#html_domain_indices = True
203
204# If false, no index is generated.
205#html_use_index = True
206
207# If true, the index is split into individual pages for each letter.
208#html_split_index = False
209
210# If true, links to the reST sources are added to the pages.
211#html_show_sourcelink = True
212
213# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
214#html_show_sphinx = True
215
216# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
217#html_show_copyright = True
218
219# If true, an OpenSearch description file will be output, and all pages will
220# contain a <link> tag referring to it. The value of this option must be the
221# base URL from which the finished HTML is served.
222#html_use_opensearch = ''
223
224# This is the file name suffix for HTML files (e.g. ".xhtml").
225#html_file_suffix = None
226
227# Language to be used for generating the HTML full-text search index.
228# Sphinx supports the following languages:
229# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
230# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
231#html_search_language = 'en'
232
233# A dictionary with options for the search language support, empty by default.
234# Now only 'ja' uses this config value
235#html_search_options = {'type': 'default'}
236
237# The name of a javascript file (relative to the configuration directory) that
238# implements a search results scorer. If empty, the default will be used.
239#html_search_scorer = 'scorer.js'
240
241# Output file base name for HTML help builder.
242htmlhelp_basename = 'TheLinuxKerneldoc'
243
244# -- Options for LaTeX output ---------------------------------------------
245
246latex_elements = {
247# The paper size ('letterpaper' or 'a4paper').
248#'papersize': 'letterpaper',
249
250# The font size ('10pt', '11pt' or '12pt').
251#'pointsize': '10pt',
252
253# Additional stuff for the LaTeX preamble.
254#'preamble': '',
255
256# Latex figure (float) alignment
257#'figure_align': 'htbp',
258}
259
260# Grouping the document tree into LaTeX files. List of tuples
261# (source start file, target name, title,
262# author, documentclass [howto, manual, or own class]).
263latex_documents = [
264 (master_doc, 'TheLinuxKernel.tex', 'The Linux Kernel Documentation',
265 'The kernel development community', 'manual'),
266]
267
268# The name of an image file (relative to this directory) to place at the top of
269# the title page.
270#latex_logo = None
271
272# For "manual" documents, if this is true, then toplevel headings are parts,
273# not chapters.
274#latex_use_parts = False
275
276# If true, show page references after internal links.
277#latex_show_pagerefs = False
278
279# If true, show URL addresses after external links.
280#latex_show_urls = False
281
282# Documents to append as an appendix to all manuals.
283#latex_appendices = []
284
285# If false, no module index is generated.
286#latex_domain_indices = True
287
288
289# -- Options for manual page output ---------------------------------------
290
291# One entry per manual page. List of tuples
292# (source start file, name, description, authors, manual section).
293man_pages = [
294 (master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
295 [author], 1)
296]
297
298# If true, show URL addresses after external links.
299#man_show_urls = False
300
301
302# -- Options for Texinfo output -------------------------------------------
303
304# Grouping the document tree into Texinfo files. List of tuples
305# (source start file, target name, title, author,
306# dir menu entry, description, category)
307texinfo_documents = [
308 (master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
309 author, 'TheLinuxKernel', 'One line description of project.',
310 'Miscellaneous'),
311]
312
313# Documents to append as an appendix to all manuals.
314#texinfo_appendices = []
315
316# If false, no module index is generated.
317#texinfo_domain_indices = True
318
319# How to display URL addresses: 'footnote', 'no', or 'inline'.
320#texinfo_show_urls = 'footnote'
321
322# If true, do not generate a @detailmenu in the "Top" node's menu.
323#texinfo_no_detailmenu = False
324
325
326# -- Options for Epub output ----------------------------------------------
327
328# Bibliographic Dublin Core info.
329epub_title = project
330epub_author = author
331epub_publisher = author
332epub_copyright = copyright
333
334# The basename for the epub file. It defaults to the project name.
335#epub_basename = project
336
337# The HTML theme for the epub output. Since the default themes are not
338# optimized for small screen space, using the same theme for HTML and epub
339# output is usually not wise. This defaults to 'epub', a theme designed to save
340# visual space.
341#epub_theme = 'epub'
342
343# The language of the text. It defaults to the language option
344# or 'en' if the language is not set.
345#epub_language = ''
346
347# The scheme of the identifier. Typical schemes are ISBN or URL.
348#epub_scheme = ''
349
350# The unique identifier of the text. This can be a ISBN number
351# or the project homepage.
352#epub_identifier = ''
353
354# A unique identification for the text.
355#epub_uid = ''
356
357# A tuple containing the cover image and cover page html template filenames.
358#epub_cover = ()
359
360# A sequence of (type, uri, title) tuples for the guide element of content.opf.
361#epub_guide = ()
362
363# HTML files that should be inserted before the pages created by sphinx.
364# The format is a list of tuples containing the path and title.
365#epub_pre_files = []
366
367# HTML files that should be inserted after the pages created by sphinx.
368# The format is a list of tuples containing the path and title.
369#epub_post_files = []
370
371# A list of files that should not be packed into the epub file.
372epub_exclude_files = ['search.html']
373
374# The depth of the table of contents in toc.ncx.
375#epub_tocdepth = 3
376
377# Allow duplicate toc entries.
378#epub_tocdup = True
379
380# Choose between 'default' and 'includehidden'.
381#epub_tocscope = 'default'
382
383# Fix unsupported image types using the Pillow.
384#epub_fix_images = False
385
386# Scale large images.
387#epub_max_image_width = 0
388
389# How to display URL addresses: 'footnote', 'no', or 'inline'.
390#epub_show_urls = 'inline'
391
392# If false, no index is generated.
393#epub_use_index = True
394
395#=======
396# rst2pdf
397#
398# Grouping the document tree into PDF files. List of tuples
399# (source start file, target name, title, author, options).
400#
401# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
402#
403# FIXME: Do not add the index file here; the result will be too big. Adding
404# multiple PDF files here actually tries to get the cross-referencing right
405# *between* PDF files.
406pdf_documents = [
407 ('index', u'Kernel', u'Kernel', u'J. Random Bozo'),
408]
409
410# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
411# the Docs). In a normal build, these are supplied from the Makefile via command
412# line arguments.
413kerneldoc_bin = '../scripts/kernel-doc'
414kerneldoc_srctree = '..'
diff --git a/Documentation/devicetree/bindings/display/arm,malidp.txt b/Documentation/devicetree/bindings/display/arm,malidp.txt
new file mode 100644
index 000000000000..2f7870983ef1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/arm,malidp.txt
@@ -0,0 +1,65 @@
1ARM Mali-DP
2
3The following bindings apply to a family of Display Processors sold as
4licensable IP by ARM Ltd. The bindings describe the Mali DP500, DP550 and
5DP650 processors that offer multiple composition layers, support for
6rotation and scaling output.
7
8Required properties:
9 - compatible: should be one of
10 "arm,mali-dp500"
11 "arm,mali-dp550"
12 "arm,mali-dp650"
13 depending on the particular implementation present in the hardware
14 - reg: Physical base address and size of the block of registers used by
15 the processor.
16 - interrupts: Interrupt list, as defined in ../interrupt-controller/interrupts.txt,
17 interrupt client nodes.
18 - interrupt-names: name of the engine inside the processor that will
19 use the corresponding interrupt. Should be one of "DE" or "SE".
20 - clocks: A list of phandle + clock-specifier pairs, one for each entry
21 in 'clock-names'
22 - clock-names: A list of clock names. It should contain:
23 - "pclk": for the APB interface clock
24 - "aclk": for the AXI interface clock
25 - "mclk": for the main processor clock
26 - "pxlclk": for the pixel clock feeding the output PLL of the processor.
27 - arm,malidp-output-port-lines: Array of u8 values describing the number
28 of output lines per channel (R, G and B).
29
30Required sub-nodes:
31 - port: The Mali DP connection to an encoder input port. The connection
32 is modelled using the OF graph bindings specified in
33 Documentation/devicetree/bindings/graph.txt
34
35Optional properties:
36 - memory-region: phandle to a node describing memory (see
37 Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
38 to be used for the framebuffer; if not present, the framebuffer may
39 be located anywhere in memory.
40
41
42Example:
43
44/ {
45 ...
46
47 dp0: malidp@6f200000 {
48 compatible = "arm,mali-dp650";
49 reg = <0 0x6f200000 0 0x20000>;
50 memory-region = <&display_reserved>;
51 interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>,
52 <0 168 IRQ_TYPE_LEVEL_HIGH>;
53 interrupt-names = "DE", "SE";
54 clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
55 clock-names = "pxlclk", "mclk", "aclk", "pclk";
56 arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
57 port {
58 dp0_output: endpoint {
59 remote-endpoint = <&tda998x_2_input>;
60 };
61 };
62 };
63
64 ...
65};
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
new file mode 100644
index 000000000000..56a3e68ccb80
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -0,0 +1,35 @@
1sii902x HDMI bridge bindings
2
3Required properties:
4 - compatible: "sil,sii9022"
5 - reg: i2c address of the bridge
6
7Optional properties:
8 - interrupts-extended or interrupt-parent + interrupts: describe
9 the interrupt line used to inform the host about hotplug events.
10 - reset-gpios: OF device-tree gpio specification for RST_N pin.
11
12Optional subnodes:
13 - video input: this subnode can contain a video input port node
14 to connect the bridge to a display controller output (See this
15 documentation [1]).
16
17[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
18
19Example:
20 hdmi-bridge@39 {
21 compatible = "sil,sii9022";
22 reg = <0x39>;
23 reset-gpios = <&pioA 1 0>;
24 ports {
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 port@0 {
29 reg = <0>;
30 bridge_in: endpoint {
31 remote-endpoint = <&dc_out>;
32 };
33 };
34 };
35 };
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
index acd5668b1ce1..508aee461e0d 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
@@ -8,6 +8,7 @@ Required properties:
8Optional properties: 8Optional properties:
9- label: a symbolic name for the connector 9- label: a symbolic name for the connector
10- hpd-gpios: HPD GPIO number 10- hpd-gpios: HPD GPIO number
11- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing
11 12
12Required nodes: 13Required nodes:
13- Video port for HDMI input 14- Video port for HDMI input
diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt
index 0a175d991b52..a407462c885e 100644
--- a/Documentation/devicetree/bindings/display/imx/ldb.txt
+++ b/Documentation/devicetree/bindings/display/imx/ldb.txt
@@ -62,6 +62,7 @@ Required properties:
62 display-timings are used instead. 62 display-timings are used instead.
63 63
64Optional properties (required if display-timings are used): 64Optional properties (required if display-timings are used):
65 - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
65 - display-timings : A node that describes the display timings as defined in 66 - display-timings : A node that describes the display timings as defined in
66 Documentation/devicetree/bindings/display/display-timing.txt. 67 Documentation/devicetree/bindings/display/display-timing.txt.
67 - fsl,data-mapping : should be "spwg" or "jeida" 68 - fsl,data-mapping : should be "spwg" or "jeida"
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
new file mode 100644
index 000000000000..7b124242b0c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
@@ -0,0 +1,148 @@
1Mediatek HDMI Encoder
2=====================
3
4The Mediatek HDMI encoder can generate HDMI 1.4a or MHL 2.0 signals from
5its parallel input.
6
7Required properties:
8- compatible: Should be "mediatek,<chip>-hdmi".
9- reg: Physical base address and length of the controller's registers
10- interrupts: The interrupt signal from the function block.
11- clocks: device clocks
12 See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
13- clock-names: must contain "pixel", "pll", "bclk", and "spdif".
14- phys: phandle link to the HDMI PHY node.
15 See Documentation/devicetree/bindings/phy/phy-bindings.txt for details.
16- phy-names: must contain "hdmi"
17- mediatek,syscon-hdmi: phandle link and register offset to the system
18 configuration registers. For mt8173 this must be offset 0x900 into the
19 MMSYS_CONFIG region: <&mmsys 0x900>.
20- ports: A node containing input and output port nodes with endpoint
21 definitions as documented in Documentation/devicetree/bindings/graph.txt.
22- port@0: The input port in the ports node should be connected to a DPI output
23 port.
24- port@1: The output port in the ports node should be connected to the input
25 port of a connector node that contains a ddc-i2c-bus property, or to the
26 input port of an attached bridge chip, such as a SlimPort transmitter.
27
28HDMI CEC
29========
30
31The HDMI CEC controller handles hotplug detection and CEC communication.
32
33Required properties:
34- compatible: Should be "mediatek,<chip>-cec"
35- reg: Physical base address and length of the controller's registers
36- interrupts: The interrupt signal from the function block.
37- clocks: device clock
38
39HDMI DDC
40========
41
42The HDMI DDC i2c controller is used to interface with the HDMI DDC pins.
43The Mediatek's I2C controller is used to interface with I2C devices.
44
45Required properties:
46- compatible: Should be "mediatek,<chip>-hdmi-ddc"
47- reg: Physical base address and length of the controller's registers
48- clocks: device clock
49- clock-names: Should be "ddc-i2c".
50
51HDMI PHY
52========
53
54The HDMI PHY serializes the HDMI encoder's three channel 10-bit parallel
55output and drives the HDMI pads.
56
57Required properties:
58- compatible: "mediatek,<chip>-hdmi-phy"
59- reg: Physical base address and length of the module's registers
60- clocks: PLL reference clock
61- clock-names: must contain "pll_ref"
62- clock-output-names: must be "hdmitx_dig_cts" on mt8173
63- #phy-cells: must be <0>
64- #clock-cells: must be <0>
65
66Optional properties:
67- mediatek,ibias: TX DRV bias current for <1.65Gbps, defaults to 0xa
68- mediatek,ibias_up: TX DRV bias current for >1.65Gbps, defaults to 0x1c
69
70Example:
71
72cec: cec@10013000 {
73 compatible = "mediatek,mt8173-cec";
74 reg = <0 0x10013000 0 0xbc>;
75 interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
76 clocks = <&infracfg CLK_INFRA_CEC>;
77};
78
79hdmi_phy: hdmi-phy@10209100 {
80 compatible = "mediatek,mt8173-hdmi-phy";
81 reg = <0 0x10209100 0 0x24>;
82 clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
83 clock-names = "pll_ref";
84 clock-output-names = "hdmitx_dig_cts";
85 mediatek,ibias = <0xa>;
86 mediatek,ibias_up = <0x1c>;
87 #clock-cells = <0>;
88 #phy-cells = <0>;
89};
90
91hdmi_ddc0: i2c@11012000 {
92 compatible = "mediatek,mt8173-hdmi-ddc";
93 reg = <0 0x11012000 0 0x1c>;
94 interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
95 clocks = <&pericfg CLK_PERI_I2C5>;
96 clock-names = "ddc-i2c";
97};
98
99hdmi0: hdmi@14025000 {
100 compatible = "mediatek,mt8173-hdmi";
101 reg = <0 0x14025000 0 0x400>;
102 interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_LOW>;
103 clocks = <&mmsys CLK_MM_HDMI_PIXEL>,
104 <&mmsys CLK_MM_HDMI_PLLCK>,
105 <&mmsys CLK_MM_HDMI_AUDIO>,
106 <&mmsys CLK_MM_HDMI_SPDIF>;
107 clock-names = "pixel", "pll", "bclk", "spdif";
108 pinctrl-names = "default";
109 pinctrl-0 = <&hdmi_pin>;
110 phys = <&hdmi_phy>;
111 phy-names = "hdmi";
112 mediatek,syscon-hdmi = <&mmsys 0x900>;
113 assigned-clocks = <&topckgen CLK_TOP_HDMI_SEL>;
114 assigned-clock-parents = <&hdmi_phy>;
115
116 ports {
117 #address-cells = <1>;
118 #size-cells = <0>;
119
120 port@0 {
121 reg = <0>;
122
123 hdmi0_in: endpoint {
124 remote-endpoint = <&dpi0_out>;
125 };
126 };
127
128 port@1 {
129 reg = <1>;
130
131 hdmi0_out: endpoint {
132 remote-endpoint = <&hdmi_con_in>;
133 };
134 };
135 };
136};
137
138connector {
139 compatible = "hdmi-connector";
140 type = "a";
141 ddc-i2c-bus = <&hdmiddc0>;
142
143 port {
144 hdmi_con_in: endpoint {
145 remote-endpoint = <&hdmi0_out>;
146 };
147 };
148};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
index 216c894d4f99..b52ac52757df 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
@@ -7,6 +7,8 @@ Required properties:
7Optional properties: 7Optional properties:
8- label: a symbolic name for the panel 8- label: a symbolic name for the panel
9- enable-gpios: panel enable gpio 9- enable-gpios: panel enable gpio
10- reset-gpios: GPIO to control the RESET pin
11- vcc-supply: phandle of regulator that will be used to enable power to the display
10 12
11Required nodes: 13Required nodes:
12- "panel-timing" containing video timings 14- "panel-timing" containing video timings
diff --git a/Documentation/devicetree/bindings/hwmon/ina2xx.txt b/Documentation/devicetree/bindings/hwmon/ina2xx.txt
index 9bcd5e87830d..02af0d94e921 100644
--- a/Documentation/devicetree/bindings/hwmon/ina2xx.txt
+++ b/Documentation/devicetree/bindings/hwmon/ina2xx.txt
@@ -7,6 +7,7 @@ Required properties:
7 - "ti,ina220" for ina220 7 - "ti,ina220" for ina220
8 - "ti,ina226" for ina226 8 - "ti,ina226" for ina226
9 - "ti,ina230" for ina230 9 - "ti,ina230" for ina230
10 - "ti,ina231" for ina231
10- reg: I2C address 11- reg: I2C address
11 12
12Optional properties: 13Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
index bfeabb843941..71191ff0e781 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
@@ -44,8 +44,8 @@ Required properties:
44- our-claim-gpio: The GPIO that we use to claim the bus. 44- our-claim-gpio: The GPIO that we use to claim the bus.
45- their-claim-gpios: The GPIOs that the other sides use to claim the bus. 45- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
46 Note that some implementations may only support a single other master. 46 Note that some implementations may only support a single other master.
47- Standard I2C mux properties. See mux.txt in this directory. 47- Standard I2C mux properties. See i2c-mux.txt in this directory.
48- Single I2C child bus node at reg 0. See mux.txt in this directory. 48- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
49 49
50Optional properties: 50Optional properties:
51- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. 51- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
index 6078aefe7ed4..7ce23ac61308 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
@@ -27,7 +27,8 @@ Required properties:
27- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C 27- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
28 parents. 28 parents.
29 29
30Furthermore, I2C mux properties and child nodes. See mux.txt in this directory. 30Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
31directory.
31 32
32Example: 33Example:
33 34
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
index 66709a825541..21da3ecbb370 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
@@ -22,8 +22,8 @@ Required properties:
22- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side 22- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
23 port is connected to. 23 port is connected to.
24- mux-gpios: list of gpios used to control the muxer 24- mux-gpios: list of gpios used to control the muxer
25* Standard I2C mux properties. See mux.txt in this directory. 25* Standard I2C mux properties. See i2c-mux.txt in this directory.
26* I2C child bus nodes. See mux.txt in this directory. 26* I2C child bus nodes. See i2c-mux.txt in this directory.
27 27
28Optional properties: 28Optional properties:
29- idle-state: value to set the muxer to when idle. When no value is 29- idle-state: value to set the muxer to when idle. When no value is
@@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
33be numbered based on their order in the device tree. 33be numbered based on their order in the device tree.
34 34
35Whenever an access is made to a device on a child bus, the value set 35Whenever an access is made to a device on a child bus, the value set
36in the revelant node's reg property will be output using the list of 36in the relevant node's reg property will be output using the list of
37GPIOs, the first in the list holding the least-significant value. 37GPIOs, the first in the list holding the least-significant value.
38 38
39If an idle state is defined, using the idle-state (optional) property, 39If an idle state is defined, using the idle-state (optional) property,
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
index ae8af1694e95..33119a98e144 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
@@ -28,9 +28,9 @@ Also required are:
28* Standard pinctrl properties that specify the pin mux state for each child 28* Standard pinctrl properties that specify the pin mux state for each child
29 bus. See ../pinctrl/pinctrl-bindings.txt. 29 bus. See ../pinctrl/pinctrl-bindings.txt.
30 30
31* Standard I2C mux properties. See mux.txt in this directory. 31* Standard I2C mux properties. See i2c-mux.txt in this directory.
32 32
33* I2C child bus nodes. See mux.txt in this directory. 33* I2C child bus nodes. See i2c-mux.txt in this directory.
34 34
35For each named state defined in the pinctrl-names property, an I2C child bus 35For each named state defined in the pinctrl-names property, an I2C child bus
36will be created. I2C child bus numbers are assigned based on the index into 36will be created. I2C child bus numbers are assigned based on the index into
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
index 688783fbe696..de00d7fc450b 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
@@ -7,8 +7,8 @@ Required properties:
7- compatible: i2c-mux-reg 7- compatible: i2c-mux-reg
8- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side 8- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
9 port is connected to. 9 port is connected to.
10* Standard I2C mux properties. See mux.txt in this directory. 10* Standard I2C mux properties. See i2c-mux.txt in this directory.
11* I2C child bus nodes. See mux.txt in this directory. 11* I2C child bus nodes. See i2c-mux.txt in this directory.
12 12
13Optional properties: 13Optional properties:
14- reg: this pair of <offset size> specifies the register to control the mux. 14- reg: this pair of <offset size> specifies the register to control the mux.
@@ -24,7 +24,7 @@ Optional properties:
24 given, it defaults to the last value used. 24 given, it defaults to the last value used.
25 25
26Whenever an access is made to a device on a child bus, the value set 26Whenever an access is made to a device on a child bus, the value set
27in the revelant node's reg property will be output to the register. 27in the relevant node's reg property will be output to the register.
28 28
29If an idle state is defined, using the idle-state (optional) property, 29If an idle state is defined, using the idle-state (optional) property,
30whenever an access is not being made to a device on a child bus, the 30whenever an access is not being made to a device on a child bus, the
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
index 14aa6cf58201..6a9a63cb0543 100644
--- a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
+++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
@@ -13,10 +13,10 @@ Optional properties:
13 initialization. This is an array of 28 values(u8). 13 initialization. This is an array of 28 values(u8).
14 14
15 - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. 15 - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
16 firmware will use the pin to wakeup host system. 16 firmware will use the pin to wakeup host system (u16).
17 - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host 17 - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
18 platform. The value will be configured to firmware. This 18 platform. The value will be configured to firmware. This
19 is needed to work chip's sleep feature as expected. 19 is needed to work chip's sleep feature as expected (u16).
20 - interrupt-parent: phandle of the parent interrupt controller 20 - interrupt-parent: phandle of the parent interrupt controller
21 - interrupts : interrupt pin number to the cpu. Driver will request an irq based 21 - interrupts : interrupt pin number to the cpu. Driver will request an irq based
22 on this interrupt number. During system suspend, the irq will be 22 on this interrupt number. During system suspend, the irq will be
@@ -50,7 +50,7 @@ calibration data is also available in below example.
50 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 50 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
51 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 51 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
52 0x00 0x00 0xf0 0x00>; 52 0x00 0x00 0xf0 0x00>;
53 marvell,wakeup-pin = <0x0d>; 53 marvell,wakeup-pin = /bits/ 16 <0x0d>;
54 marvell,wakeup-gap-ms = <0x64>; 54 marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
55 }; 55 };
56}; 56};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a7440bcd67ff..2c2500df0dce 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -255,6 +255,7 @@ synology Synology, Inc.
255SUNW Sun Microsystems, Inc 255SUNW Sun Microsystems, Inc
256tbs TBS Technologies 256tbs TBS Technologies
257tcl Toby Churchill Ltd. 257tcl Toby Churchill Ltd.
258technexion TechNexion
258technologic Technologic Systems 259technologic Technologic Systems
259thine THine Electronics, Inc. 260thine THine Electronics, Inc.
260ti Texas Instruments 261ti Texas Instruments
@@ -269,6 +270,7 @@ tronsmart Tronsmart
269truly Truly Semiconductors Limited 270truly Truly Semiconductors Limited
270tyan Tyan Computer Corporation 271tyan Tyan Computer Corporation
271upisemi uPI Semiconductor Corp. 272upisemi uPI Semiconductor Corp.
273uniwest United Western Technologies Corp (UniWest)
272urt United Radiant Technology Corporation 274urt United Radiant Technology Corporation
273usi Universal Scientific Industrial Co., Ltd. 275usi Universal Scientific Industrial Co., Ltd.
274v3 V3 Semiconductor 276v3 V3 Semiconductor
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 122b7f4876bb..91ce82d5f0c4 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -323,7 +323,7 @@ supported.
323 * device_resume 323 * device_resume
324 - Resumes a transfer on the channel 324 - Resumes a transfer on the channel
325 - This command should operate synchronously on the channel, 325 - This command should operate synchronously on the channel,
326 pausing right away the work of the given channel 326 resuming right away the work of the given channel
327 327
328 * device_terminate_all 328 * device_terminate_all
329 - Aborts all the pending and ongoing transfers on the channel 329 - Aborts all the pending and ongoing transfers on the channel
diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt
index 30d2fcb32f72..9f94fe276dea 100644
--- a/Documentation/filesystems/devpts.txt
+++ b/Documentation/filesystems/devpts.txt
@@ -1,141 +1,26 @@
1Each mount of the devpts filesystem is now distinct such that ptys
2and their indicies allocated in one mount are independent from ptys
3and their indicies in all other mounts.
1 4
2To support containers, we now allow multiple instances of devpts filesystem, 5All mounts of the devpts filesystem now create a /dev/pts/ptmx node
3such that indices of ptys allocated in one instance are independent of indices 6with permissions 0000.
4allocated in other instances of devpts.
5 7
6To preserve backward compatibility, this support for multiple instances is 8To retain backwards compatibility the a ptmx device node (aka any node
7enabled only if: 9created with "mknod name c 5 2") when opened will look for an instance
10of devpts under the name "pts" in the same directory as the ptmx device
11node.
8 12
9 - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and 13As an option instead of placing a /dev/ptmx device node at /dev/ptmx
10 - '-o newinstance' mount option is specified while mounting devpts 14it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
11 15to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using
12IOW, devpts now supports both single-instance and multi-instance semantics. 16the devpts filesystem in this manner devpts should be mounted with
13 17the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
14If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
15this referred to as the "legacy" mode. In this mode, the new mount options
16(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
17on console.
18
19If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
20'newinstance' option (as in current start-up scripts) the new mount binds
21to the initial kernel mount of devpts. This mode is referred to as the
22'single-instance' mode and the current, single-instance semantics are
23preserved, i.e PTYs are common across the system.
24
25The only difference between this single-instance mode and the legacy mode
26is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
27can safely be ignored.
28
29If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
30the mount is considered to be in the multi-instance mode and a new instance
31of the devpts fs is created. Any ptys created in this instance are independent
32of ptys in other instances of devpts. Like in the single-instance mode, the
33/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
34open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
35bind-mount.
36
37Eg: A container startup script could do the following:
38
39 $ chmod 0666 /dev/pts/ptmx
40 $ rm /dev/ptmx
41 $ ln -s pts/ptmx /dev/ptmx
42 $ ns_exec -cm /bin/bash
43
44 # We are now in new container
45
46 $ umount /dev/pts
47 $ mount -t devpts -o newinstance lxcpts /dev/pts
48 $ sshd -p 1234
49
50where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
51/bin/bash in the child process. A pty created by the sshd is not visible in
52the original mount of /dev/pts.
53 18
54Total count of pty pairs in all instances is limited by sysctls: 19Total count of pty pairs in all instances is limited by sysctls:
55kernel.pty.max = 4096 - global limit 20kernel.pty.max = 4096 - global limit
56kernel.pty.reserve = 1024 - reserve for initial instance 21kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace
57kernel.pty.nr - current count of ptys 22kernel.pty.nr - current count of ptys
58 23
59Per-instance limit could be set by adding mount option "max=<count>". 24Per-instance limit could be set by adding mount option "max=<count>".
60This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve. 25This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
61In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit. 26In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
62
63User-space changes
64------------------
65
66In multi-instance mode (i.e '-o newinstance' mount option is specified at least
67once), following user-space issues should be noted.
68
691. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
70 and no change is needed to system-startup scripts.
71
722. To effectively use multi-instance mode (i.e -o newinstance is specified)
73 administrators or startup scripts should "redirect" open of /dev/ptmx to
74 /dev/pts/ptmx using either a bind mount or symlink.
75
76 $ mount -t devpts -o newinstance devpts /dev/pts
77
78 followed by either
79
80 $ rm /dev/ptmx
81 $ ln -s pts/ptmx /dev/ptmx
82 $ chmod 666 /dev/pts/ptmx
83 or
84 $ mount -o bind /dev/pts/ptmx /dev/ptmx
85
863. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
87 enables better error-reporting and treats both single-instance and
88 multi-instance mounts similarly.
89
90 But this method requires that system-startup scripts set the mode of
91 /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
92 mode by, either
93
94 - adding ptmxmode mount option to devpts entry in /etc/fstab, or
95 - using 'chmod 0666 /dev/pts/ptmx'
96
974. If multi-instance mode mount is needed for containers, but the system
98 startup scripts have not yet been updated, container-startup scripts
99 should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
100 instance mounts.
101
102 Or, in general, container-startup scripts should use:
103
104 mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
105 if [ ! -L /dev/ptmx ]; then
106 mount -o bind /dev/pts/ptmx /dev/ptmx
107 fi
108
109 When all devpts mounts are multi-instance, /dev/ptmx can permanently be
110 a symlink to pts/ptmx and the bind mount can be ignored.
111
1125. A multi-instance mount that is not accompanied by the /dev/ptmx to
113 /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
114
115 mount -t devpts -o newinstance lxcpts /dev/pts
116
117 immediately followed by:
118
119 open("/dev/ptmx")
120
121 would create a pty, say /dev/pts/7, in the initial kernel mount.
122 But /dev/pts/7 would be invisible in the new mount.
123
1246. The permissions for /dev/pts/ptmx node should be specified when mounting
125 /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
126
127 mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
128
129 The permissions can be later be changed as usual with 'chmod'.
130
131 chmod 666 /dev/pts/ptmx
132
1337. A mount of devpts without the 'newinstance' option results in binding to
134 initial kernel mount. This behavior while preserving legacy semantics,
135 does not provide strict isolation in a container environment. i.e by
136 mounting devpts without the 'newinstance' option, a container could
137 get visibility into the 'host' or root container's devpts.
138
139 To workaround this and have strict isolation, all mounts of devpts,
140 including the mount in the root container, should use the newinstance
141 option.
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
new file mode 100644
index 000000000000..4f7176576feb
--- /dev/null
+++ b/Documentation/gpu/drm-internals.rst
@@ -0,0 +1,378 @@
1=============
2DRM Internals
3=============
4
5This chapter documents DRM internals relevant to driver authors and
6developers working to add support for the latest features to existing
7drivers.
8
9First, we go over some typical driver initialization requirements, like
10setting up command buffers, creating an initial output configuration,
11and initializing core services. Subsequent sections cover core internals
12in more detail, providing implementation notes and examples.
13
14The DRM layer provides several services to graphics drivers, many of
15them driven by the application interfaces it provides through libdrm,
16the library that wraps most of the DRM ioctls. These include vblank
17event handling, memory management, output management, framebuffer
18management, command submission & fencing, suspend/resume support, and
19DMA services.
20
21Driver Initialization
22=====================
23
24At the core of every DRM driver is a :c:type:`struct drm_driver
25<drm_driver>` structure. Drivers typically statically initialize
26a drm_driver structure, and then pass it to
27:c:func:`drm_dev_alloc()` to allocate a device instance. After the
28device instance is fully initialized it can be registered (which makes
29it accessible from userspace) using :c:func:`drm_dev_register()`.
30
31The :c:type:`struct drm_driver <drm_driver>` structure
32contains static information that describes the driver and features it
33supports, and pointers to methods that the DRM core will call to
34implement the DRM API. We will first go through the :c:type:`struct
35drm_driver <drm_driver>` static information fields, and will
36then describe individual operations in details as they get used in later
37sections.
38
39Driver Information
40------------------
41
42Driver Features
43~~~~~~~~~~~~~~~
44
45Drivers inform the DRM core about their requirements and supported
46features by setting appropriate flags in the driver_features field.
47Since those flags influence the DRM core behaviour since registration
48time, most of them must be set to registering the :c:type:`struct
49drm_driver <drm_driver>` instance.
50
51u32 driver_features;
52
53DRIVER_USE_AGP
54 Driver uses AGP interface, the DRM core will manage AGP resources.
55
56DRIVER_REQUIRE_AGP
57 Driver needs AGP interface to function. AGP initialization failure
58 will become a fatal error.
59
60DRIVER_PCI_DMA
61 Driver is capable of PCI DMA, mapping of PCI DMA buffers to
62 userspace will be enabled. Deprecated.
63
64DRIVER_SG
65 Driver can perform scatter/gather DMA, allocation and mapping of
66 scatter/gather buffers will be enabled. Deprecated.
67
68DRIVER_HAVE_DMA
69 Driver supports DMA, the userspace DMA API will be supported.
70 Deprecated.
71
72DRIVER_HAVE_IRQ; DRIVER_IRQ_SHARED
73 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
74 managed by the DRM Core. The core will support simple IRQ handler
75 installation when the flag is set. The installation process is
76 described in ?.
77
78 DRIVER_IRQ_SHARED indicates whether the device & handler support
79 shared IRQs (note that this is required of PCI drivers).
80
81DRIVER_GEM
82 Driver use the GEM memory manager.
83
84DRIVER_MODESET
85 Driver supports mode setting interfaces (KMS).
86
87DRIVER_PRIME
88 Driver implements DRM PRIME buffer sharing.
89
90DRIVER_RENDER
91 Driver supports dedicated render nodes.
92
93DRIVER_ATOMIC
94 Driver supports atomic properties. In this case the driver must
95 implement appropriate obj->atomic_get_property() vfuncs for any
96 modeset objects with driver specific properties.
97
98Major, Minor and Patchlevel
99~~~~~~~~~~~~~~~~~~~~~~~~~~~
100
101int major; int minor; int patchlevel;
102The DRM core identifies driver versions by a major, minor and patch
103level triplet. The information is printed to the kernel log at
104initialization time and passed to userspace through the
105DRM_IOCTL_VERSION ioctl.
106
107The major and minor numbers are also used to verify the requested driver
108API version passed to DRM_IOCTL_SET_VERSION. When the driver API
109changes between minor versions, applications can call
110DRM_IOCTL_SET_VERSION to select a specific version of the API. If the
111requested major isn't equal to the driver major, or the requested minor
112is larger than the driver minor, the DRM_IOCTL_SET_VERSION call will
113return an error. Otherwise the driver's set_version() method will be
114called with the requested version.
115
116Name, Description and Date
117~~~~~~~~~~~~~~~~~~~~~~~~~~
118
119char \*name; char \*desc; char \*date;
120The driver name is printed to the kernel log at initialization time,
121used for IRQ registration and passed to userspace through
122DRM_IOCTL_VERSION.
123
124The driver description is a purely informative string passed to
125userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
126the kernel.
127
128The driver date, formatted as YYYYMMDD, is meant to identify the date of
129the latest modification to the driver. However, as most drivers fail to
130update it, its value is mostly useless. The DRM core prints it to the
131kernel log at initialization time and passes it to userspace through the
132DRM_IOCTL_VERSION ioctl.
133
134Device Instance and Driver Handling
135-----------------------------------
136
137.. kernel-doc:: drivers/gpu/drm/drm_drv.c
138 :doc: driver instance overview
139
140.. kernel-doc:: drivers/gpu/drm/drm_drv.c
141 :export:
142
143Driver Load
144-----------
145
146IRQ Registration
147~~~~~~~~~~~~~~~~
148
149The DRM core tries to facilitate IRQ handler registration and
150unregistration by providing :c:func:`drm_irq_install()` and
151:c:func:`drm_irq_uninstall()` functions. Those functions only
152support a single interrupt per device, devices that use more than one
153IRQs need to be handled manually.
154
155Managed IRQ Registration
156''''''''''''''''''''''''
157
158:c:func:`drm_irq_install()` starts by calling the irq_preinstall
159driver operation. The operation is optional and must make sure that the
160interrupt will not get fired by clearing all pending interrupt flags or
161disabling the interrupt.
162
163The passed-in IRQ will then be requested by a call to
164:c:func:`request_irq()`. If the DRIVER_IRQ_SHARED driver feature
165flag is set, a shared (IRQF_SHARED) IRQ handler will be requested.
166
167The IRQ handler function must be provided as the mandatory irq_handler
168driver operation. It will get passed directly to
169:c:func:`request_irq()` and thus has the same prototype as all IRQ
170handlers. It will get called with a pointer to the DRM device as the
171second argument.
172
173Finally the function calls the optional irq_postinstall driver
174operation. The operation usually enables interrupts (excluding the
175vblank interrupt, which is enabled separately), but drivers may choose
176to enable/disable interrupts at a different time.
177
178:c:func:`drm_irq_uninstall()` is similarly used to uninstall an
179IRQ handler. It starts by waking up all processes waiting on a vblank
180interrupt to make sure they don't hang, and then calls the optional
181irq_uninstall driver operation. The operation must disable all hardware
182interrupts. Finally the function frees the IRQ by calling
183:c:func:`free_irq()`.
184
185Manual IRQ Registration
186'''''''''''''''''''''''
187
188Drivers that require multiple interrupt handlers can't use the managed
189IRQ registration functions. In that case IRQs must be registered and
190unregistered manually (usually with the :c:func:`request_irq()` and
191:c:func:`free_irq()` functions, or their devm_\* equivalent).
192
193When manually registering IRQs, drivers must not set the
194DRIVER_HAVE_IRQ driver feature flag, and must not provide the
195irq_handler driver operation. They must set the :c:type:`struct
196drm_device <drm_device>` irq_enabled field to 1 upon
197registration of the IRQs, and clear it to 0 after unregistering the
198IRQs.
199
200Memory Manager Initialization
201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
202
203Every DRM driver requires a memory manager which must be initialized at
204load time. DRM currently contains two memory managers, the Translation
205Table Manager (TTM) and the Graphics Execution Manager (GEM). This
206document describes the use of the GEM memory manager only. See ? for
207details.
208
209Miscellaneous Device Configuration
210~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
211
212Another task that may be necessary for PCI devices during configuration
213is mapping the video BIOS. On many devices, the VBIOS describes device
214configuration, LCD panel timings (if any), and contains flags indicating
215device state. Mapping the BIOS can be done using the pci_map_rom()
216call, a convenience function that takes care of mapping the actual ROM,
217whether it has been shadowed into memory (typically at address 0xc0000)
218or exists on the PCI device in the ROM BAR. Note that after the ROM has
219been mapped and any necessary information has been extracted, it should
220be unmapped; on many devices, the ROM address decoder is shared with
221other BARs, so leaving it mapped could cause undesired behaviour like
222hangs or memory corruption.
223
224Bus-specific Device Registration and PCI Support
225------------------------------------------------
226
227A number of functions are provided to help with device registration. The
228functions deal with PCI and platform devices respectively and are only
229provided for historical reasons. These are all deprecated and shouldn't
230be used in new drivers. Besides that there's a few helpers for pci
231drivers.
232
233.. kernel-doc:: drivers/gpu/drm/drm_pci.c
234 :export:
235
236.. kernel-doc:: drivers/gpu/drm/drm_platform.c
237 :export:
238
239Open/Close, File Operations and IOCTLs
240======================================
241
242Open and Close
243--------------
244
245int (\*firstopen) (struct drm_device \*); void (\*lastclose) (struct
246drm_device \*); int (\*open) (struct drm_device \*, struct drm_file
247\*); void (\*preclose) (struct drm_device \*, struct drm_file \*);
248void (\*postclose) (struct drm_device \*, struct drm_file \*);
249 Open and close handlers. None of those methods are mandatory.
250
251The firstopen method is called by the DRM core for legacy UMS (User Mode
252Setting) drivers only when an application opens a device that has no
253other opened file handle. UMS drivers can implement it to acquire device
254resources. KMS drivers can't use the method and must acquire resources
255in the load method instead.
256
257Similarly the lastclose method is called when the last application
258holding a file handle opened on the device closes it, for both UMS and
259KMS drivers. Additionally, the method is also called at module unload
260time or, for hot-pluggable devices, when the device is unplugged. The
261firstopen and lastclose calls can thus be unbalanced.
262
263The open method is called every time the device is opened by an
264application. Drivers can allocate per-file private data in this method
265and store them in the struct :c:type:`struct drm_file
266<drm_file>` driver_priv field. Note that the open method is
267called before firstopen.
268
269The close operation is split into preclose and postclose methods.
270Drivers must stop and cleanup all per-file operations in the preclose
271method. For instance pending vertical blanking and page flip events must
272be cancelled. No per-file operation is allowed on the file handle after
273returning from the preclose method.
274
275Finally the postclose method is called as the last step of the close
276operation, right before calling the lastclose method if no other open
277file handle exists for the device. Drivers that have allocated per-file
278private data in the open method should free it here.
279
280The lastclose method should restore CRTC and plane properties to default
281value, so that a subsequent open of the device will not inherit state
282from the previous user. It can also be used to execute delayed power
283switching state changes, e.g. in conjunction with the vga_switcheroo
284infrastructure (see ?). Beyond that KMS drivers should not do any
285further cleanup. Only legacy UMS drivers might need to clean up device
286state so that the vga console or an independent fbdev driver could take
287over.
288
289File Operations
290---------------
291
292.. kernel-doc:: drivers/gpu/drm/drm_fops.c
293 :doc: file operations
294
295.. kernel-doc:: drivers/gpu/drm/drm_fops.c
296 :export:
297
298IOCTLs
299------
300
301struct drm_ioctl_desc \*ioctls; int num_ioctls;
302 Driver-specific ioctls descriptors table.
303
304Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
305descriptors table is indexed by the ioctl number offset from the base
306value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize
307the table entries.
308
309::
310
311 DRM_IOCTL_DEF_DRV(ioctl, func, flags)
312
313``ioctl`` is the ioctl name. Drivers must define the DRM_##ioctl and
314DRM_IOCTL_##ioctl macros to the ioctl number offset from
315DRM_COMMAND_BASE and the ioctl number respectively. The first macro is
316private to the device while the second must be exposed to userspace in a
317public header.
318
319``func`` is a pointer to the ioctl handler function compatible with the
320``drm_ioctl_t`` type.
321
322::
323
324 typedef int drm_ioctl_t(struct drm_device *dev, void *data,
325 struct drm_file *file_priv);
326
327``flags`` is a bitmask combination of the following values. It restricts
328how the ioctl is allowed to be called.
329
330- DRM_AUTH - Only authenticated callers allowed
331
332- DRM_MASTER - The ioctl can only be called on the master file handle
333
334- DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
335
336- DRM_CONTROL_ALLOW - The ioctl can only be called on a control
337 device
338
339- DRM_UNLOCKED - The ioctl handler will be called without locking the
340 DRM global mutex. This is the enforced default for kms drivers (i.e.
341 using the DRIVER_MODESET flag) and hence shouldn't be used any more
342 for new drivers.
343
344.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
345 :export:
346
347Legacy Support Code
348===================
349
350The section very briefly covers some of the old legacy support code
351which is only used by old DRM drivers which have done a so-called
352shadow-attach to the underlying device instead of registering as a real
353driver. This also includes some of the old generic buffer management and
354command submission code. Do not use any of this in new and modern
355drivers.
356
357Legacy Suspend/Resume
358---------------------
359
360The DRM core provides some suspend/resume code, but drivers wanting full
361suspend/resume support should provide save() and restore() functions.
362These are called at suspend, hibernate, or resume time, and should
363perform any state save or restore required by your device across suspend
364or hibernate states.
365
366int (\*suspend) (struct drm_device \*, pm_message_t state); int
367(\*resume) (struct drm_device \*);
368Those are legacy suspend and resume methods which *only* work with the
369legacy shadow-attach driver registration functions. New driver should
370use the power management interface provided by their bus type (usually
371through the :c:type:`struct device_driver <device_driver>`
372dev_pm_ops) and set these methods to NULL.
373
374Legacy DMA Services
375-------------------
376
377This should cover how DMA mapping etc. is supported by the core. These
378functions are deprecated and should not be used.
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
new file mode 100644
index 000000000000..0b302fedf1af
--- /dev/null
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -0,0 +1,260 @@
1=============================
2Mode Setting Helper Functions
3=============================
4
5The plane, CRTC, encoder and connector functions provided by the drivers
6implement the DRM API. They're called by the DRM core and ioctl handlers
7to handle device state changes and configuration request. As
8implementing those functions often requires logic not specific to
9drivers, mid-layer helper functions are available to avoid duplicating
10boilerplate code.
11
12The DRM core contains one mid-layer implementation. The mid-layer
13provides implementations of several plane, CRTC, encoder and connector
14functions (called from the top of the mid-layer) that pre-process
15requests and call lower-level functions provided by the driver (at the
16bottom of the mid-layer). For instance, the
17:c:func:`drm_crtc_helper_set_config()` function can be used to
18fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
19set_config field. When called, it will split the set_config operation
20in smaller, simpler operations and call the driver to handle them.
21
22To use the mid-layer, drivers call
23:c:func:`drm_crtc_helper_add()`,
24:c:func:`drm_encoder_helper_add()` and
25:c:func:`drm_connector_helper_add()` functions to install their
26mid-layer bottom operations handlers, and fill the :c:type:`struct
27drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
28drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
29drm_connector_funcs <drm_connector_funcs>` structures with
30pointers to the mid-layer top API functions. Installing the mid-layer
31bottom operation handlers is best done right after registering the
32corresponding KMS object.
33
34The mid-layer is not split between CRTC, encoder and connector
35operations. To use it, a driver must provide bottom functions for all of
36the three KMS entities.
37
38Atomic Modeset Helper Functions Reference
39=========================================
40
41Overview
42--------
43
44.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
45 :doc: overview
46
47Implementing Asynchronous Atomic Commit
48---------------------------------------
49
50.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
51 :doc: implementing nonblocking commit
52
53Atomic State Reset and Initialization
54-------------------------------------
55
56.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
57 :doc: atomic state reset and initialization
58
59.. kernel-doc:: include/drm/drm_atomic_helper.h
60 :internal:
61
62.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
63 :export:
64
65Modeset Helper Reference for Common Vtables
66===========================================
67
68.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
69 :internal:
70
71.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
72 :doc: overview
73
74Legacy CRTC/Modeset Helper Functions Reference
75==============================================
76
77.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
78 :export:
79
80.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
81 :doc: overview
82
83Output Probing Helper Functions Reference
84=========================================
85
86.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
87 :doc: output probing helper overview
88
89.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
90 :export:
91
92fbdev Helper Functions Reference
93================================
94
95.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
96 :doc: fbdev helpers
97
98.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
99 :export:
100
101.. kernel-doc:: include/drm/drm_fb_helper.h
102 :internal:
103
104Framebuffer CMA Helper Functions Reference
105==========================================
106
107.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
108 :doc: framebuffer cma helper functions
109
110.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
111 :export:
112
113Display Port Helper Functions Reference
114=======================================
115
116.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
117 :doc: dp helpers
118
119.. kernel-doc:: include/drm/drm_dp_helper.h
120 :internal:
121
122.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
123 :export:
124
125Display Port Dual Mode Adaptor Helper Functions Reference
126=========================================================
127
128.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
129 :doc: dp dual mode helpers
130
131.. kernel-doc:: include/drm/drm_dp_dual_mode_helper.h
132 :internal:
133
134.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
135 :export:
136
137Display Port MST Helper Functions Reference
138===========================================
139
140.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
141 :doc: dp mst helper
142
143.. kernel-doc:: include/drm/drm_dp_mst_helper.h
144 :internal:
145
146.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
147 :export:
148
149MIPI DSI Helper Functions Reference
150===================================
151
152.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
153 :doc: dsi helpers
154
155.. kernel-doc:: include/drm/drm_mipi_dsi.h
156 :internal:
157
158.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
159 :export:
160
161EDID Helper Functions Reference
162===============================
163
164.. kernel-doc:: drivers/gpu/drm/drm_edid.c
165 :export:
166
167Rectangle Utilities Reference
168=============================
169
170.. kernel-doc:: include/drm/drm_rect.h
171 :doc: rect utils
172
173.. kernel-doc:: include/drm/drm_rect.h
174 :internal:
175
176.. kernel-doc:: drivers/gpu/drm/drm_rect.c
177 :export:
178
179Flip-work Helper Reference
180==========================
181
182.. kernel-doc:: include/drm/drm_flip_work.h
183 :doc: flip utils
184
185.. kernel-doc:: include/drm/drm_flip_work.h
186 :internal:
187
188.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
189 :export:
190
191HDMI Infoframes Helper Reference
192================================
193
194Strictly speaking this is not a DRM helper library but generally useable
195by any driver interfacing with HDMI outputs like v4l or alsa drivers.
196But it nicely fits into the overall topic of mode setting helper
197libraries and hence is also included here.
198
199.. kernel-doc:: include/linux/hdmi.h
200 :internal:
201
202.. kernel-doc:: drivers/video/hdmi.c
203 :export:
204
205Plane Helper Reference
206======================
207
208.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
209 :export:
210
211.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
212 :doc: overview
213
214Tile group
215----------
216
217.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
218 :doc: Tile group
219
220Bridges
221=======
222
223Overview
224--------
225
226.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
227 :doc: overview
228
229Default bridge callback sequence
230--------------------------------
231
232.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
233 :doc: bridge callbacks
234
235.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
236 :export:
237
238Panel Helper Reference
239======================
240
241.. kernel-doc:: include/drm/drm_panel.h
242 :internal:
243
244.. kernel-doc:: drivers/gpu/drm/drm_panel.c
245 :export:
246
247.. kernel-doc:: drivers/gpu/drm/drm_panel.c
248 :doc: drm panel
249
250Simple KMS Helper Reference
251===========================
252
253.. kernel-doc:: include/drm/drm_simple_kms_helper.h
254 :internal:
255
256.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
257 :export:
258
259.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
260 :doc: overview
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
new file mode 100644
index 000000000000..0e1c80436c1d
--- /dev/null
+++ b/Documentation/gpu/drm-kms.rst
@@ -0,0 +1,656 @@
1=========================
2Kernel Mode Setting (KMS)
3=========================
4
5Mode Setting
6============
7
8Drivers must initialize the mode setting core by calling
9:c:func:`drm_mode_config_init()` on the DRM device. The function
10initializes the :c:type:`struct drm_device <drm_device>`
11mode_config field and never fails. Once done, mode configuration must
12be setup by initializing the following fields.
13
14- int min_width, min_height; int max_width, max_height;
15 Minimum and maximum width and height of the frame buffers in pixel
16 units.
17
18- struct drm_mode_config_funcs \*funcs;
19 Mode setting functions.
20
21Display Modes Function Reference
22--------------------------------
23
24.. kernel-doc:: include/drm/drm_modes.h
25 :internal:
26
27.. kernel-doc:: drivers/gpu/drm/drm_modes.c
28 :export:
29
30Atomic Mode Setting Function Reference
31--------------------------------------
32
33.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
34 :export:
35
36.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
37 :internal:
38
39Frame Buffer Abstraction
40------------------------
41
42Frame buffers are abstract memory objects that provide a source of
43pixels to scanout to a CRTC. Applications explicitly request the
44creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
45and receive an opaque handle that can be passed to the KMS CRTC control,
46plane configuration and page flip functions.
47
48Frame buffers rely on the underneath memory manager for low-level memory
49operations. When creating a frame buffer applications pass a memory
50handle (or a list of memory handles for multi-planar formats) through
51the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
52userspace buffer management interface this would be a GEM handle.
53Drivers are however free to use their own backing storage object
54handles, e.g. vmwgfx directly exposes special TTM handles to userspace
55and so expects TTM handles in the create ioctl and not GEM handles.
56
57The lifetime of a drm framebuffer is controlled with a reference count,
58drivers can grab additional references with
59:c:func:`drm_framebuffer_reference()`and drop them again with
60:c:func:`drm_framebuffer_unreference()`. For driver-private
61framebuffers for which the last reference is never dropped (e.g. for the
62fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
63<drm_framebuffer>` is embedded into the fbdev helper struct)
64drivers can manually clean up a framebuffer at module unload time with
65:c:func:`drm_framebuffer_unregister_private()`.
66
67DRM Format Handling
68-------------------
69
70.. kernel-doc:: include/drm/drm_fourcc.h
71 :internal:
72
73.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
74 :export:
75
76Dumb Buffer Objects
77-------------------
78
79The KMS API doesn't standardize backing storage object creation and
80leaves it to driver-specific ioctls. Furthermore actually creating a
81buffer object even for GEM-based drivers is done through a
82driver-specific ioctl - GEM only has a common userspace interface for
83sharing and destroying objects. While not an issue for full-fledged
84graphics stacks that include device-specific userspace components (in
85libdrm for instance), this limit makes DRM-based early boot graphics
86unnecessarily complex.
87
88Dumb objects partly alleviate the problem by providing a standard API to
89create dumb buffers suitable for scanout, which can then be used to
90create KMS frame buffers.
91
92To support dumb objects drivers must implement the dumb_create,
93dumb_destroy and dumb_map_offset operations.
94
95- int (\*dumb_create)(struct drm_file \*file_priv, struct
96 drm_device \*dev, struct drm_mode_create_dumb \*args);
97 The dumb_create operation creates a driver object (GEM or TTM
98 handle) suitable for scanout based on the width, height and depth
99 from the struct :c:type:`struct drm_mode_create_dumb
100 <drm_mode_create_dumb>` argument. It fills the argument's
101 handle, pitch and size fields with a handle for the newly created
102 object and its line pitch and size in bytes.
103
104- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
105 drm_device \*dev, uint32_t handle);
106 The dumb_destroy operation destroys a dumb object created by
107 dumb_create.
108
109- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
110 drm_device \*dev, uint32_t handle, uint64_t \*offset);
111 The dumb_map_offset operation associates an mmap fake offset with
112 the object given by the handle and returns it. Drivers must use the
113 :c:func:`drm_gem_create_mmap_offset()` function to associate
114 the fake offset as described in ?.
115
116Note that dumb objects may not be used for gpu acceleration, as has been
117attempted on some ARM embedded platforms. Such drivers really must have
118a hardware-specific ioctl to allocate suitable buffer objects.
119
120Output Polling
121--------------
122
123void (\*output_poll_changed)(struct drm_device \*dev);
124This operation notifies the driver that the status of one or more
125connectors has changed. Drivers that use the fb helper can just call the
126:c:func:`drm_fb_helper_hotplug_event()` function to handle this
127operation.
128
129KMS Initialization and Cleanup
130==============================
131
132A KMS device is abstracted and exposed as a set of planes, CRTCs,
133encoders and connectors. KMS drivers must thus create and initialize all
134those objects at load time after initializing mode setting.
135
136CRTCs (:c:type:`struct drm_crtc <drm_crtc>`)
137--------------------------------------------
138
139A CRTC is an abstraction representing a part of the chip that contains a
140pointer to a scanout buffer. Therefore, the number of CRTCs available
141determines how many independent scanout buffers can be active at any
142given time. The CRTC structure contains several fields to support this:
143a pointer to some video memory (abstracted as a frame buffer object), a
144display mode, and an (x, y) offset into the video memory to support
145panning or configurations where one piece of video memory spans multiple
146CRTCs.
147
148CRTC Initialization
149~~~~~~~~~~~~~~~~~~~
150
151A KMS device must create and register at least one struct
152:c:type:`struct drm_crtc <drm_crtc>` instance. The instance is
153allocated and zeroed by the driver, possibly as part of a larger
154structure, and registered with a call to :c:func:`drm_crtc_init()`
155with a pointer to CRTC functions.
156
157Planes (:c:type:`struct drm_plane <drm_plane>`)
158-----------------------------------------------
159
160A plane represents an image source that can be blended with or overlayed
161on top of a CRTC during the scanout process. Planes are associated with
162a frame buffer to crop a portion of the image memory (source) and
163optionally scale it to a destination size. The result is then blended
164with or overlayed on top of a CRTC.
165
166The DRM core recognizes three types of planes:
167
168- DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
169 Primary planes are the planes operated upon by CRTC modesetting and
170 flipping operations described in the page_flip hook in
171 :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
172- DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
173 Cursor planes are the planes operated upon by the
174 DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
175- DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
176 planes. Some drivers refer to these types of planes as "sprites"
177 internally.
178
179For compatibility with legacy userspace, only overlay planes are made
180available to userspace by default. Userspace clients may set the
181DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
182that they wish to receive a universal plane list containing all plane
183types.
184
185Plane Initialization
186~~~~~~~~~~~~~~~~~~~~
187
188To create a plane, a KMS drivers allocates and zeroes an instances of
189:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
190larger structure) and registers it with a call to
191:c:func:`drm_universal_plane_init()`. The function takes a
192bitmask of the CRTCs that can be associated with the plane, a pointer to
193the plane functions, a list of format supported formats, and the type of
194plane (primary, cursor, or overlay) being initialized.
195
196Cursor and overlay planes are optional. All drivers should provide one
197primary plane per CRTC (although this requirement may change in the
198future); drivers that do not wish to provide special handling for
199primary planes may make use of the helper functions described in ? to
200create and register a primary plane with standard capabilities.
201
202Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
203-----------------------------------------------------
204
205An encoder takes pixel data from a CRTC and converts it to a format
206suitable for any attached connectors. On some devices, it may be
207possible to have a CRTC send data to more than one encoder. In that
208case, both encoders would receive data from the same scanout buffer,
209resulting in a "cloned" display configuration across the connectors
210attached to each encoder.
211
212Encoder Initialization
213~~~~~~~~~~~~~~~~~~~~~~
214
215As for CRTCs, a KMS driver must create, initialize and register at least
216one :c:type:`struct drm_encoder <drm_encoder>` instance. The
217instance is allocated and zeroed by the driver, possibly as part of a
218larger structure.
219
220Drivers must initialize the :c:type:`struct drm_encoder
221<drm_encoder>` possible_crtcs and possible_clones fields before
222registering the encoder. Both fields are bitmasks of respectively the
223CRTCs that the encoder can be connected to, and sibling encoders
224candidate for cloning.
225
226After being initialized, the encoder must be registered with a call to
227:c:func:`drm_encoder_init()`. The function takes a pointer to the
228encoder functions and an encoder type. Supported types are
229
230- DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
231- DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
232- DRM_MODE_ENCODER_LVDS for display panels
233- DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
234 Component, SCART)
235- DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
236
237Encoders must be attached to a CRTC to be used. DRM drivers leave
238encoders unattached at initialization time. Applications (or the fbdev
239compatibility layer when implemented) are responsible for attaching the
240encoders they want to use to a CRTC.
241
242Connectors (:c:type:`struct drm_connector <drm_connector>`)
243-----------------------------------------------------------
244
245A connector is the final destination for pixel data on a device, and
246usually connects directly to an external display device like a monitor
247or laptop panel. A connector can only be attached to one encoder at a
248time. The connector is also the structure where information about the
249attached display is kept, so it contains fields for display data, EDID
250data, DPMS & connection status, and information about modes supported on
251the attached displays.
252
253Connector Initialization
254~~~~~~~~~~~~~~~~~~~~~~~~
255
256Finally a KMS driver must create, initialize, register and attach at
257least one :c:type:`struct drm_connector <drm_connector>`
258instance. The instance is created as other KMS objects and initialized
259by setting the following fields.
260
261interlace_allowed
262 Whether the connector can handle interlaced modes.
263
264doublescan_allowed
265 Whether the connector can handle doublescan.
266
267display_info
268 Display information is filled from EDID information when a display
269 is detected. For non hot-pluggable displays such as flat panels in
270 embedded systems, the driver should initialize the
271 display_info.width_mm and display_info.height_mm fields with the
272 physical size of the display.
273
274polled
275 Connector polling mode, a combination of
276
277 DRM_CONNECTOR_POLL_HPD
278 The connector generates hotplug events and doesn't need to be
279 periodically polled. The CONNECT and DISCONNECT flags must not
280 be set together with the HPD flag.
281
282 DRM_CONNECTOR_POLL_CONNECT
283 Periodically poll the connector for connection.
284
285 DRM_CONNECTOR_POLL_DISCONNECT
286 Periodically poll the connector for disconnection.
287
288 Set to 0 for connectors that don't support connection status
289 discovery.
290
291The connector is then registered with a call to
292:c:func:`drm_connector_init()` with a pointer to the connector
293functions and a connector type, and exposed through sysfs with a call to
294:c:func:`drm_connector_register()`.
295
296Supported connector types are
297
298- DRM_MODE_CONNECTOR_VGA
299- DRM_MODE_CONNECTOR_DVII
300- DRM_MODE_CONNECTOR_DVID
301- DRM_MODE_CONNECTOR_DVIA
302- DRM_MODE_CONNECTOR_Composite
303- DRM_MODE_CONNECTOR_SVIDEO
304- DRM_MODE_CONNECTOR_LVDS
305- DRM_MODE_CONNECTOR_Component
306- DRM_MODE_CONNECTOR_9PinDIN
307- DRM_MODE_CONNECTOR_DisplayPort
308- DRM_MODE_CONNECTOR_HDMIA
309- DRM_MODE_CONNECTOR_HDMIB
310- DRM_MODE_CONNECTOR_TV
311- DRM_MODE_CONNECTOR_eDP
312- DRM_MODE_CONNECTOR_VIRTUAL
313
314Connectors must be attached to an encoder to be used. For devices that
315map connectors to encoders 1:1, the connector should be attached at
316initialization time with a call to
317:c:func:`drm_mode_connector_attach_encoder()`. The driver must
318also set the :c:type:`struct drm_connector <drm_connector>`
319encoder field to point to the attached encoder.
320
321Finally, drivers must initialize the connectors state change detection
322with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
323one connector is pollable but can't generate hotplug interrupts
324(indicated by the DRM_CONNECTOR_POLL_CONNECT and
325DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
326automatically be queued to periodically poll for changes. Connectors
327that can generate hotplug interrupts must be marked with the
328DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
329call :c:func:`drm_helper_hpd_irq_event()`. The function will
330queue a delayed work to check the state of all connectors, but no
331periodic polling will be done.
332
333Connector Operations
334~~~~~~~~~~~~~~~~~~~~
335
336 **Note**
337
338 Unless otherwise state, all operations are mandatory.
339
340DPMS
341''''
342
343void (\*dpms)(struct drm_connector \*connector, int mode);
344The DPMS operation sets the power state of a connector. The mode
345argument is one of
346
347- DRM_MODE_DPMS_ON
348
349- DRM_MODE_DPMS_STANDBY
350
351- DRM_MODE_DPMS_SUSPEND
352
353- DRM_MODE_DPMS_OFF
354
355In all but DPMS_ON mode the encoder to which the connector is attached
356should put the display in low-power mode by driving its signals
357appropriately. If more than one connector is attached to the encoder
358care should be taken not to change the power state of other displays as
359a side effect. Low-power mode should be propagated to the encoders and
360CRTCs when all related connectors are put in low-power mode.
361
362Modes
363'''''
364
365int (\*fill_modes)(struct drm_connector \*connector, uint32_t
366max_width, uint32_t max_height);
367Fill the mode list with all supported modes for the connector. If the
368``max_width`` and ``max_height`` arguments are non-zero, the
369implementation must ignore all modes wider than ``max_width`` or higher
370than ``max_height``.
371
372The connector must also fill in this operation its display_info
373width_mm and height_mm fields with the connected display physical size
374in millimeters. The fields should be set to 0 if the value isn't known
375or is not applicable (for instance for projector devices).
376
377Connection Status
378'''''''''''''''''
379
380The connection status is updated through polling or hotplug events when
381supported (see ?). The status value is reported to userspace through
382ioctls and must not be used inside the driver, as it only gets
383initialized by a call to :c:func:`drm_mode_getconnector()` from
384userspace.
385
386enum drm_connector_status (\*detect)(struct drm_connector
387\*connector, bool force);
388Check to see if anything is attached to the connector. The ``force``
389parameter is set to false whilst polling or to true when checking the
390connector due to user request. ``force`` can be used by the driver to
391avoid expensive, destructive operations during automated probing.
392
393Return connector_status_connected if something is connected to the
394connector, connector_status_disconnected if nothing is connected and
395connector_status_unknown if the connection state isn't known.
396
397Drivers should only return connector_status_connected if the
398connection status has really been probed as connected. Connectors that
399can't detect the connection status, or failed connection status probes,
400should return connector_status_unknown.
401
402Cleanup
403-------
404
405The DRM core manages its objects' lifetime. When an object is not needed
406anymore the core calls its destroy function, which must clean up and
407free every resource allocated for the object. Every
408:c:func:`drm_\*_init()` call must be matched with a corresponding
409:c:func:`drm_\*_cleanup()` call to cleanup CRTCs
410(:c:func:`drm_crtc_cleanup()`), planes
411(:c:func:`drm_plane_cleanup()`), encoders
412(:c:func:`drm_encoder_cleanup()`) and connectors
413(:c:func:`drm_connector_cleanup()`). Furthermore, connectors that
414have been added to sysfs must be removed by a call to
415:c:func:`drm_connector_unregister()` before calling
416:c:func:`drm_connector_cleanup()`.
417
418Connectors state change detection must be cleanup up with a call to
419:c:func:`drm_kms_helper_poll_fini()`.
420
421Output discovery and initialization example
422-------------------------------------------
423
424::
425
426 void intel_crt_init(struct drm_device *dev)
427 {
428 struct drm_connector *connector;
429 struct intel_output *intel_output;
430
431 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
432 if (!intel_output)
433 return;
434
435 connector = &intel_output->base;
436 drm_connector_init(dev, &intel_output->base,
437 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
438
439 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
440 DRM_MODE_ENCODER_DAC);
441
442 drm_mode_connector_attach_encoder(&intel_output->base,
443 &intel_output->enc);
444
445 /* Set up the DDC bus. */
446 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
447 if (!intel_output->ddc_bus) {
448 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
449 "failed.\n");
450 return;
451 }
452
453 intel_output->type = INTEL_OUTPUT_ANALOG;
454 connector->interlace_allowed = 0;
455 connector->doublescan_allowed = 0;
456
457 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
458 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
459
460 drm_connector_register(connector);
461 }
462
463In the example above (taken from the i915 driver), a CRTC, connector and
464encoder combination is created. A device-specific i2c bus is also
465created for fetching EDID data and performing monitor detection. Once
466the process is complete, the new connector is registered with sysfs to
467make its properties available to applications.
468
469KMS API Functions
470-----------------
471
472.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
473 :export:
474
475KMS Data Structures
476-------------------
477
478.. kernel-doc:: include/drm/drm_crtc.h
479 :internal:
480
481KMS Locking
482-----------
483
484.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
485 :doc: kms locking
486
487.. kernel-doc:: include/drm/drm_modeset_lock.h
488 :internal:
489
490.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
491 :export:
492
493KMS Properties
494==============
495
496Drivers may need to expose additional parameters to applications than
497those described in the previous sections. KMS supports attaching
498properties to CRTCs, connectors and planes and offers a userspace API to
499list, get and set the property values.
500
501Properties are identified by a name that uniquely defines the property
502purpose, and store an associated value. For all property types except
503blob properties the value is a 64-bit unsigned integer.
504
505KMS differentiates between properties and property instances. Drivers
506first create properties and then create and associate individual
507instances of those properties to objects. A property can be instantiated
508multiple times and associated with different objects. Values are stored
509in property instances, and all other property information are stored in
510the property and shared between all instances of the property.
511
512Every property is created with a type that influences how the KMS core
513handles the property. Supported property types are
514
515DRM_MODE_PROP_RANGE
516 Range properties report their minimum and maximum admissible values.
517 The KMS core verifies that values set by application fit in that
518 range.
519
520DRM_MODE_PROP_ENUM
521 Enumerated properties take a numerical value that ranges from 0 to
522 the number of enumerated values defined by the property minus one,
523 and associate a free-formed string name to each value. Applications
524 can retrieve the list of defined value-name pairs and use the
525 numerical value to get and set property instance values.
526
527DRM_MODE_PROP_BITMASK
528 Bitmask properties are enumeration properties that additionally
529 restrict all enumerated values to the 0..63 range. Bitmask property
530 instance values combine one or more of the enumerated bits defined
531 by the property.
532
533DRM_MODE_PROP_BLOB
534 Blob properties store a binary blob without any format restriction.
535 The binary blobs are created as KMS standalone objects, and blob
536 property instance values store the ID of their associated blob
537 object.
538
539 Blob properties are only used for the connector EDID property and
540 cannot be created by drivers.
541
542To create a property drivers call one of the following functions
543depending on the property type. All property creation functions take
544property flags and name, as well as type-specific arguments.
545
546- struct drm_property \*drm_property_create_range(struct
547 drm_device \*dev, int flags, const char \*name, uint64_t min,
548 uint64_t max);
549 Create a range property with the given minimum and maximum values.
550
551- struct drm_property \*drm_property_create_enum(struct drm_device
552 \*dev, int flags, const char \*name, const struct
553 drm_prop_enum_list \*props, int num_values);
554 Create an enumerated property. The ``props`` argument points to an
555 array of ``num_values`` value-name pairs.
556
557- struct drm_property \*drm_property_create_bitmask(struct
558 drm_device \*dev, int flags, const char \*name, const struct
559 drm_prop_enum_list \*props, int num_values);
560 Create a bitmask property. The ``props`` argument points to an array
561 of ``num_values`` value-name pairs.
562
563Properties can additionally be created as immutable, in which case they
564will be read-only for applications but can be modified by the driver. To
565create an immutable property drivers must set the
566DRM_MODE_PROP_IMMUTABLE flag at property creation time.
567
568When no array of value-name pairs is readily available at property
569creation time for enumerated or range properties, drivers can create the
570property using the :c:func:`drm_property_create()` function and
571manually add enumeration value-name pairs by calling the
572:c:func:`drm_property_add_enum()` function. Care must be taken to
573properly specify the property type through the ``flags`` argument.
574
575After creating properties drivers can attach property instances to CRTC,
576connector and plane objects by calling the
577:c:func:`drm_object_attach_property()`. The function takes a
578pointer to the target object, a pointer to the previously created
579property and an initial instance value.
580
581Existing KMS Properties
582-----------------------
583
584The following table gives description of drm properties exposed by
585various modules/drivers.
586
587.. csv-table::
588 :header-rows: 1
589 :file: kms-properties.csv
590
591Vertical Blanking
592=================
593
594Vertical blanking plays a major role in graphics rendering. To achieve
595tear-free display, users must synchronize page flips and/or rendering to
596vertical blanking. The DRM API offers ioctls to perform page flips
597synchronized to vertical blanking and wait for vertical blanking.
598
599The DRM core handles most of the vertical blanking management logic,
600which involves filtering out spurious interrupts, keeping race-free
601blanking counters, coping with counter wrap-around and resets and
602keeping use counts. It relies on the driver to generate vertical
603blanking interrupts and optionally provide a hardware vertical blanking
604counter. Drivers must implement the following operations.
605
606- int (\*enable_vblank) (struct drm_device \*dev, int crtc); void
607 (\*disable_vblank) (struct drm_device \*dev, int crtc);
608 Enable or disable vertical blanking interrupts for the given CRTC.
609
610- u32 (\*get_vblank_counter) (struct drm_device \*dev, int crtc);
611 Retrieve the value of the vertical blanking counter for the given
612 CRTC. If the hardware maintains a vertical blanking counter its value
613 should be returned. Otherwise drivers can use the
614 :c:func:`drm_vblank_count()` helper function to handle this
615 operation.
616
617Drivers must initialize the vertical blanking handling core with a call
618to :c:func:`drm_vblank_init()` in their load operation.
619
620Vertical blanking interrupts can be enabled by the DRM core or by
621drivers themselves (for instance to handle page flipping operations).
622The DRM core maintains a vertical blanking use count to ensure that the
623interrupts are not disabled while a user still needs them. To increment
624the use count, drivers call :c:func:`drm_vblank_get()`. Upon
625return vertical blanking interrupts are guaranteed to be enabled.
626
627To decrement the use count drivers call
628:c:func:`drm_vblank_put()`. Only when the use count drops to zero
629will the DRM core disable the vertical blanking interrupts after a delay
630by scheduling a timer. The delay is accessible through the
631vblankoffdelay module parameter or the ``drm_vblank_offdelay`` global
632variable and expressed in milliseconds. Its default value is 5000 ms.
633Zero means never disable, and a negative value means disable
634immediately. Drivers may override the behaviour by setting the
635:c:type:`struct drm_device <drm_device>`
636vblank_disable_immediate flag, which when set causes vblank interrupts
637to be disabled immediately regardless of the drm_vblank_offdelay
638value. The flag should only be set if there's a properly working
639hardware vblank counter present.
640
641When a vertical blanking interrupt occurs drivers only need to call the
642:c:func:`drm_handle_vblank()` function to account for the
643interrupt.
644
645Resources allocated by :c:func:`drm_vblank_init()` must be freed
646with a call to :c:func:`drm_vblank_cleanup()` in the driver unload
647operation handler.
648
649Vertical Blanking and Interrupt Handling Functions Reference
650------------------------------------------------------------
651
652.. kernel-doc:: drivers/gpu/drm/drm_irq.c
653 :export:
654
655.. kernel-doc:: include/drm/drmP.h
656 :functions: drm_crtc_vblank_waitqueue
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
new file mode 100644
index 000000000000..59f9822fecd0
--- /dev/null
+++ b/Documentation/gpu/drm-mm.rst
@@ -0,0 +1,454 @@
1=====================
2DRM Memory Management
3=====================
4
5Modern Linux systems require large amount of graphics memory to store
6frame buffers, textures, vertices and other graphics-related data. Given
7the very dynamic nature of many of that data, managing graphics memory
8efficiently is thus crucial for the graphics stack and plays a central
9role in the DRM infrastructure.
10
11The DRM core includes two memory managers, namely Translation Table Maps
12(TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
13manager to be developed and tried to be a one-size-fits-them all
14solution. It provides a single userspace API to accommodate the need of
15all hardware, supporting both Unified Memory Architecture (UMA) devices
16and devices with dedicated video RAM (i.e. most discrete video cards).
17This resulted in a large, complex piece of code that turned out to be
18hard to use for driver development.
19
20GEM started as an Intel-sponsored project in reaction to TTM's
21complexity. Its design philosophy is completely different: instead of
22providing a solution to every graphics memory-related problems, GEM
23identified common code between drivers and created a support library to
24share it. GEM has simpler initialization and execution requirements than
25TTM, but has no video RAM management capabilities and is thus limited to
26UMA devices.
27
28The Translation Table Manager (TTM)
29-----------------------------------
30
31TTM design background and information belongs here.
32
33TTM initialization
34~~~~~~~~~~~~~~~~~~
35
36 **Warning**
37
38 This section is outdated.
39
40Drivers wishing to support TTM must fill out a drm_bo_driver
41structure. The structure contains several fields with function pointers
42for initializing the TTM, allocating and freeing memory, waiting for
43command completion and fence synchronization, and memory migration. See
44the radeon_ttm.c file for an example of usage.
45
46The ttm_global_reference structure is made up of several fields:
47
48::
49
50 struct ttm_global_reference {
51 enum ttm_global_types global_type;
52 size_t size;
53 void *object;
54 int (*init) (struct ttm_global_reference *);
55 void (*release) (struct ttm_global_reference *);
56 };
57
58
59There should be one global reference structure for your memory manager
60as a whole, and there will be others for each object created by the
61memory manager at runtime. Your global TTM should have a type of
62TTM_GLOBAL_TTM_MEM. The size field for the global object should be
63sizeof(struct ttm_mem_global), and the init and release hooks should
64point at your driver-specific init and release routines, which probably
65eventually call ttm_mem_global_init and ttm_mem_global_release,
66respectively.
67
68Once your global TTM accounting structure is set up and initialized by
69calling ttm_global_item_ref() on it, you need to create a buffer
70object TTM to provide a pool for buffer object allocation by clients and
71the kernel itself. The type of this object should be
72TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
73ttm_bo_global). Again, driver-specific init and release functions may
74be provided, likely eventually calling ttm_bo_global_init() and
75ttm_bo_global_release(), respectively. Also, like the previous
76object, ttm_global_item_ref() is used to create an initial reference
77count for the TTM, which will call your initialization function.
78
79The Graphics Execution Manager (GEM)
80------------------------------------
81
82The GEM design approach has resulted in a memory manager that doesn't
83provide full coverage of all (or even all common) use cases in its
84userspace or kernel API. GEM exposes a set of standard memory-related
85operations to userspace and a set of helper functions to drivers, and
86let drivers implement hardware-specific operations with their own
87private API.
88
89The GEM userspace API is described in the `GEM - the Graphics Execution
90Manager <http://lwn.net/Articles/283798/>`__ article on LWN. While
91slightly outdated, the document provides a good overview of the GEM API
92principles. Buffer allocation and read and write operations, described
93as part of the common GEM API, are currently implemented using
94driver-specific ioctls.
95
96GEM is data-agnostic. It manages abstract buffer objects without knowing
97what individual buffers contain. APIs that require knowledge of buffer
98contents or purpose, such as buffer allocation or synchronization
99primitives, are thus outside of the scope of GEM and must be implemented
100using driver-specific ioctls.
101
102On a fundamental level, GEM involves several operations:
103
104- Memory allocation and freeing
105- Command execution
106- Aperture management at command execution time
107
108Buffer object allocation is relatively straightforward and largely
109provided by Linux's shmem layer, which provides memory to back each
110object.
111
112Device-specific operations, such as command execution, pinning, buffer
113read & write, mapping, and domain ownership transfers are left to
114driver-specific ioctls.
115
116GEM Initialization
117~~~~~~~~~~~~~~~~~~
118
119Drivers that use GEM must set the DRIVER_GEM bit in the struct
120:c:type:`struct drm_driver <drm_driver>` driver_features
121field. The DRM core will then automatically initialize the GEM core
122before calling the load operation. Behind the scene, this will create a
123DRM Memory Manager object which provides an address space pool for
124object allocation.
125
126In a KMS configuration, drivers need to allocate and initialize a
127command ring buffer following core GEM initialization if required by the
128hardware. UMA devices usually have what is called a "stolen" memory
129region, which provides space for the initial framebuffer and large,
130contiguous memory regions required by the device. This space is
131typically not managed by GEM, and must be initialized separately into
132its own DRM MM object.
133
134GEM Objects Creation
135~~~~~~~~~~~~~~~~~~~~
136
137GEM splits creation of GEM objects and allocation of the memory that
138backs them in two distinct operations.
139
140GEM objects are represented by an instance of struct :c:type:`struct
141drm_gem_object <drm_gem_object>`. Drivers usually need to
142extend GEM objects with private information and thus create a
143driver-specific GEM object structure type that embeds an instance of
144struct :c:type:`struct drm_gem_object <drm_gem_object>`.
145
146To create a GEM object, a driver allocates memory for an instance of its
147specific GEM object type and initializes the embedded struct
148:c:type:`struct drm_gem_object <drm_gem_object>` with a call
149to :c:func:`drm_gem_object_init()`. The function takes a pointer
150to the DRM device, a pointer to the GEM object and the buffer object
151size in bytes.
152
153GEM uses shmem to allocate anonymous pageable memory.
154:c:func:`drm_gem_object_init()` will create an shmfs file of the
155requested size and store it into the struct :c:type:`struct
156drm_gem_object <drm_gem_object>` filp field. The memory is
157used as either main storage for the object when the graphics hardware
158uses system memory directly or as a backing store otherwise.
159
160Drivers are responsible for the actual physical pages allocation by
161calling :c:func:`shmem_read_mapping_page_gfp()` for each page.
162Note that they can decide to allocate pages when initializing the GEM
163object, or to delay allocation until the memory is needed (for instance
164when a page fault occurs as a result of a userspace memory access or
165when the driver needs to start a DMA transfer involving the memory).
166
167Anonymous pageable memory allocation is not always desired, for instance
168when the hardware requires physically contiguous system memory as is
169often the case in embedded devices. Drivers can create GEM objects with
170no shmfs backing (called private GEM objects) by initializing them with
171a call to :c:func:`drm_gem_private_object_init()` instead of
172:c:func:`drm_gem_object_init()`. Storage for private GEM objects
173must be managed by drivers.
174
175GEM Objects Lifetime
176~~~~~~~~~~~~~~~~~~~~
177
178All GEM objects are reference-counted by the GEM core. References can be
179acquired and release by :c:func:`calling
180drm_gem_object_reference()` and
181:c:func:`drm_gem_object_unreference()` respectively. The caller
182must hold the :c:type:`struct drm_device <drm_device>`
183struct_mutex lock when calling
184:c:func:`drm_gem_object_reference()`. As a convenience, GEM
185provides :c:func:`drm_gem_object_unreference_unlocked()`
186functions that can be called without holding the lock.
187
188When the last reference to a GEM object is released the GEM core calls
189the :c:type:`struct drm_driver <drm_driver>` gem_free_object
190operation. That operation is mandatory for GEM-enabled drivers and must
191free the GEM object and all associated resources.
192
193void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
194responsible for freeing all GEM object resources. This includes the
195resources created by the GEM core, which need to be released with
196:c:func:`drm_gem_object_release()`.
197
198GEM Objects Naming
199~~~~~~~~~~~~~~~~~~
200
201Communication between userspace and the kernel refers to GEM objects
202using local handles, global names or, more recently, file descriptors.
203All of those are 32-bit integer values; the usual Linux kernel limits
204apply to the file descriptors.
205
206GEM handles are local to a DRM file. Applications get a handle to a GEM
207object through a driver-specific ioctl, and can use that handle to refer
208to the GEM object in other standard or driver-specific ioctls. Closing a
209DRM file handle frees all its GEM handles and dereferences the
210associated GEM objects.
211
212To create a handle for a GEM object drivers call
213:c:func:`drm_gem_handle_create()`. The function takes a pointer
214to the DRM file and the GEM object and returns a locally unique handle.
215When the handle is no longer needed drivers delete it with a call to
216:c:func:`drm_gem_handle_delete()`. Finally the GEM object
217associated with a handle can be retrieved by a call to
218:c:func:`drm_gem_object_lookup()`.
219
220Handles don't take ownership of GEM objects, they only take a reference
221to the object that will be dropped when the handle is destroyed. To
222avoid leaking GEM objects, drivers must make sure they drop the
223reference(s) they own (such as the initial reference taken at object
224creation time) as appropriate, without any special consideration for the
225handle. For example, in the particular case of combined GEM object and
226handle creation in the implementation of the dumb_create operation,
227drivers must drop the initial reference to the GEM object before
228returning the handle.
229
230GEM names are similar in purpose to handles but are not local to DRM
231files. They can be passed between processes to reference a GEM object
232globally. Names can't be used directly to refer to objects in the DRM
233API, applications must convert handles to names and names to handles
234using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
235respectively. The conversion is handled by the DRM core without any
236driver-specific support.
237
238GEM also supports buffer sharing with dma-buf file descriptors through
239PRIME. GEM-based drivers must use the provided helpers functions to
240implement the exporting and importing correctly. See ?. Since sharing
241file descriptors is inherently more secure than the easily guessable and
242global GEM names it is the preferred buffer sharing mechanism. Sharing
243buffers through GEM names is only supported for legacy userspace.
244Furthermore PRIME also allows cross-device buffer sharing since it is
245based on dma-bufs.
246
247GEM Objects Mapping
248~~~~~~~~~~~~~~~~~~~
249
250Because mapping operations are fairly heavyweight GEM favours
251read/write-like access to buffers, implemented through driver-specific
252ioctls, over mapping buffers to userspace. However, when random access
253to the buffer is needed (to perform software rendering for instance),
254direct access to the object can be more efficient.
255
256The mmap system call can't be used directly to map GEM objects, as they
257don't have their own file handle. Two alternative methods currently
258co-exist to map GEM objects to userspace. The first method uses a
259driver-specific ioctl to perform the mapping operation, calling
260:c:func:`do_mmap()` under the hood. This is often considered
261dubious, seems to be discouraged for new GEM-enabled drivers, and will
262thus not be described here.
263
264The second method uses the mmap system call on the DRM file handle. void
265\*mmap(void \*addr, size_t length, int prot, int flags, int fd, off_t
266offset); DRM identifies the GEM object to be mapped by a fake offset
267passed through the mmap offset argument. Prior to being mapped, a GEM
268object must thus be associated with a fake offset. To do so, drivers
269must call :c:func:`drm_gem_create_mmap_offset()` on the object.
270
271Once allocated, the fake offset value must be passed to the application
272in a driver-specific way and can then be used as the mmap offset
273argument.
274
275The GEM core provides a helper method :c:func:`drm_gem_mmap()` to
276handle object mapping. The method can be set directly as the mmap file
277operation handler. It will look up the GEM object based on the offset
278value and set the VMA operations to the :c:type:`struct drm_driver
279<drm_driver>` gem_vm_ops field. Note that
280:c:func:`drm_gem_mmap()` doesn't map memory to userspace, but
281relies on the driver-provided fault handler to map pages individually.
282
283To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
284:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
285with a pointer to VM operations.
286
287struct vm_operations_struct \*gem_vm_ops struct
288vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
289void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
290vm_area_struct \*vma, struct vm_fault \*vmf); };
291
292The open and close operations must update the GEM object reference
293count. Drivers can use the :c:func:`drm_gem_vm_open()` and
294:c:func:`drm_gem_vm_close()` helper functions directly as open
295and close handlers.
296
297The fault operation handler is responsible for mapping individual pages
298to userspace when a page fault occurs. Depending on the memory
299allocation scheme, drivers can allocate pages at fault time, or can
300decide to allocate memory for the GEM object at the time the object is
301created.
302
303Drivers that want to map the GEM object upfront instead of handling page
304faults can implement their own mmap file operation handler.
305
306Memory Coherency
307~~~~~~~~~~~~~~~~
308
309When mapped to the device or used in a command buffer, backing pages for
310an object are flushed to memory and marked write combined so as to be
311coherent with the GPU. Likewise, if the CPU accesses an object after the
312GPU has finished rendering to the object, then the object must be made
313coherent with the CPU's view of memory, usually involving GPU cache
314flushing of various kinds. This core CPU<->GPU coherency management is
315provided by a device-specific ioctl, which evaluates an object's current
316domain and performs any necessary flushing or synchronization to put the
317object into the desired coherency domain (note that the object may be
318busy, i.e. an active render target; in that case, setting the domain
319blocks the client and waits for rendering to complete before performing
320any necessary flushing operations).
321
322Command Execution
323~~~~~~~~~~~~~~~~~
324
325Perhaps the most important GEM function for GPU devices is providing a
326command execution interface to clients. Client programs construct
327command buffers containing references to previously allocated memory
328objects, and then submit them to GEM. At that point, GEM takes care to
329bind all the objects into the GTT, execute the buffer, and provide
330necessary synchronization between clients accessing the same buffers.
331This often involves evicting some objects from the GTT and re-binding
332others (a fairly expensive operation), and providing relocation support
333which hides fixed GTT offsets from clients. Clients must take care not
334to submit command buffers that reference more objects than can fit in
335the GTT; otherwise, GEM will reject them and no rendering will occur.
336Similarly, if several objects in the buffer require fence registers to
337be allocated for correct rendering (e.g. 2D blits on pre-965 chips),
338care must be taken not to require more fence registers than are
339available to the client. Such resource management should be abstracted
340from the client in libdrm.
341
342GEM Function Reference
343----------------------
344
345.. kernel-doc:: drivers/gpu/drm/drm_gem.c
346 :export:
347
348.. kernel-doc:: include/drm/drm_gem.h
349 :internal:
350
351VMA Offset Manager
352------------------
353
354.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
355 :doc: vma offset manager
356
357.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
358 :export:
359
360.. kernel-doc:: include/drm/drm_vma_manager.h
361 :internal:
362
363PRIME Buffer Sharing
364--------------------
365
366PRIME is the cross device buffer sharing framework in drm, originally
367created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
368buffers are dma-buf based file descriptors.
369
370Overview and Driver Interface
371~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
372
373Similar to GEM global names, PRIME file descriptors are also used to
374share buffer objects across processes. They offer additional security:
375as file descriptors must be explicitly sent over UNIX domain sockets to
376be shared between applications, they can't be guessed like the globally
377unique GEM names.
378
379Drivers that support the PRIME API must set the DRIVER_PRIME bit in the
380struct :c:type:`struct drm_driver <drm_driver>`
381driver_features field, and implement the prime_handle_to_fd and
382prime_fd_to_handle operations.
383
384int (\*prime_handle_to_fd)(struct drm_device \*dev, struct drm_file
385\*file_priv, uint32_t handle, uint32_t flags, int \*prime_fd); int
386(\*prime_fd_to_handle)(struct drm_device \*dev, struct drm_file
387\*file_priv, int prime_fd, uint32_t \*handle); Those two operations
388convert a handle to a PRIME file descriptor and vice versa. Drivers must
389use the kernel dma-buf buffer sharing framework to manage the PRIME file
390descriptors. Similar to the mode setting API PRIME is agnostic to the
391underlying buffer object manager, as long as handles are 32bit unsigned
392integers.
393
394While non-GEM drivers must implement the operations themselves, GEM
395drivers must use the :c:func:`drm_gem_prime_handle_to_fd()` and
396:c:func:`drm_gem_prime_fd_to_handle()` helper functions. Those
397helpers rely on the driver gem_prime_export and gem_prime_import
398operations to create a dma-buf instance from a GEM object (dma-buf
399exporter role) and to create a GEM object from a dma-buf instance
400(dma-buf importer role).
401
402struct dma_buf \* (\*gem_prime_export)(struct drm_device \*dev,
403struct drm_gem_object \*obj, int flags); struct drm_gem_object \*
404(\*gem_prime_import)(struct drm_device \*dev, struct dma_buf
405\*dma_buf); These two operations are mandatory for GEM drivers that
406support PRIME.
407
408PRIME Helper Functions
409~~~~~~~~~~~~~~~~~~~~~~
410
411.. kernel-doc:: drivers/gpu/drm/drm_prime.c
412 :doc: PRIME Helpers
413
414PRIME Function References
415-------------------------
416
417.. kernel-doc:: drivers/gpu/drm/drm_prime.c
418 :export:
419
420DRM MM Range Allocator
421----------------------
422
423Overview
424~~~~~~~~
425
426.. kernel-doc:: drivers/gpu/drm/drm_mm.c
427 :doc: Overview
428
429LRU Scan/Eviction Support
430~~~~~~~~~~~~~~~~~~~~~~~~~
431
432.. kernel-doc:: drivers/gpu/drm/drm_mm.c
433 :doc: lru scan roaster
434
435DRM MM Range Allocator Function References
436------------------------------------------
437
438.. kernel-doc:: drivers/gpu/drm/drm_mm.c
439 :export:
440
441.. kernel-doc:: include/drm/drm_mm.h
442 :internal:
443
444CMA Helper Functions Reference
445------------------------------
446
447.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
448 :doc: cma helpers
449
450.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
451 :export:
452
453.. kernel-doc:: include/drm/drm_gem_cma_helper.h
454 :internal:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
new file mode 100644
index 000000000000..536bf3eaadd4
--- /dev/null
+++ b/Documentation/gpu/drm-uapi.rst
@@ -0,0 +1,111 @@
1===================
2Userland interfaces
3===================
4
5The DRM core exports several interfaces to applications, generally
6intended to be used through corresponding libdrm wrapper functions. In
7addition, drivers export device-specific interfaces for use by userspace
8drivers & device-aware applications through ioctls and sysfs files.
9
10External interfaces include: memory mapping, context management, DMA
11operations, AGP management, vblank control, fence management, memory
12management, and output management.
13
14Cover generic ioctls and sysfs layout here. We only need high-level
15info, since man pages should cover the rest.
16
17libdrm Device Lookup
18====================
19
20.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
21 :doc: getunique and setversion story
22
23
24Primary Nodes, DRM Master and Authentication
25============================================
26
27.. kernel-doc:: drivers/gpu/drm/drm_auth.c
28 :doc: master and authentication
29
30.. kernel-doc:: drivers/gpu/drm/drm_auth.c
31 :export:
32
33.. kernel-doc:: include/drm/drm_auth.h
34 :internal:
35
36Render nodes
37============
38
39DRM core provides multiple character-devices for user-space to use.
40Depending on which device is opened, user-space can perform a different
41set of operations (mainly ioctls). The primary node is always created
42and called card<num>. Additionally, a currently unused control node,
43called controlD<num> is also created. The primary node provides all
44legacy operations and historically was the only interface used by
45userspace. With KMS, the control node was introduced. However, the
46planned KMS control interface has never been written and so the control
47node stays unused to date.
48
49With the increased use of offscreen renderers and GPGPU applications,
50clients no longer require running compositors or graphics servers to
51make use of a GPU. But the DRM API required unprivileged clients to
52authenticate to a DRM-Master prior to getting GPU access. To avoid this
53step and to grant clients GPU access without authenticating, render
54nodes were introduced. Render nodes solely serve render clients, that
55is, no modesetting or privileged ioctls can be issued on render nodes.
56Only non-global rendering commands are allowed. If a driver supports
57render nodes, it must advertise it via the DRIVER_RENDER DRM driver
58capability. If not supported, the primary node must be used for render
59clients together with the legacy drmAuth authentication procedure.
60
61If a driver advertises render node support, DRM core will create a
62separate render node called renderD<num>. There will be one render node
63per device. No ioctls except PRIME-related ioctls will be allowed on
64this node. Especially GEM_OPEN will be explicitly prohibited. Render
65nodes are designed to avoid the buffer-leaks, which occur if clients
66guess the flink names or mmap offsets on the legacy interface.
67Additionally to this basic interface, drivers must mark their
68driver-dependent render-only ioctls as DRM_RENDER_ALLOW so render
69clients can use them. Driver authors must be careful not to allow any
70privileged ioctls on render nodes.
71
72With render nodes, user-space can now control access to the render node
73via basic file-system access-modes. A running graphics server which
74authenticates clients on the privileged primary/legacy node is no longer
75required. Instead, a client can open the render node and is immediately
76granted GPU access. Communication between clients (or servers) is done
77via PRIME. FLINK from render node to legacy node is not supported. New
78clients must not use the insecure FLINK interface.
79
80Besides dropping all modeset/global ioctls, render nodes also drop the
81DRM-Master concept. There is no reason to associate render clients with
82a DRM-Master as they are independent of any graphics server. Besides,
83they must work without any running master, anyway. Drivers must be able
84to run without a master object if they support render nodes. If, on the
85other hand, a driver requires shared state between clients which is
86visible to user-space and accessible beyond open-file boundaries, they
87cannot support render nodes.
88
89VBlank event handling
90=====================
91
92The DRM core exposes two vertical blank related ioctls:
93
94DRM_IOCTL_WAIT_VBLANK
95 This takes a struct drm_wait_vblank structure as its argument, and
96 it is used to block or request a signal when a specified vblank
97 event occurs.
98
99DRM_IOCTL_MODESET_CTL
100 This was only used for user-mode-settind drivers around modesetting
101 changes to allow the kernel to update the vblank interrupt after
102 mode setting, since on many devices the vertical blank counter is
103 reset to 0 at some point during modeset. Modern drivers should not
104 call this any more since with kernel mode setting it is a no-op.
105
106This second part of the GPU Driver Developer's Guide documents driver
107code, implementation details and also all the driver-specific userspace
108interfaces. Especially since all hardware-acceleration interfaces to
109userspace are driver specific for efficiency and other reasons these
110interfaces can be rather substantial. Hence every driver has its own
111chapter.
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
new file mode 100644
index 000000000000..2fe5952e90f1
--- /dev/null
+++ b/Documentation/gpu/i915.rst
@@ -0,0 +1,347 @@
1===========================
2 drm/i915 Intel GFX Driver
3===========================
4
5The drm/i915 driver supports all (with the exception of some very early
6models) integrated GFX chipsets with both Intel display and rendering
7blocks. This excludes a set of SoC platforms with an SGX rendering unit,
8those have basic support through the gma500 drm driver.
9
10Core Driver Infrastructure
11==========================
12
13This section covers core driver infrastructure used by both the display
14and the GEM parts of the driver.
15
16Runtime Power Management
17------------------------
18
19.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
20 :doc: runtime pm
21
22.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
23 :internal:
24
25.. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c
26 :internal:
27
28Interrupt Handling
29------------------
30
31.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
32 :doc: interrupt handling
33
34.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
35 :functions: intel_irq_init intel_irq_init_hw intel_hpd_init
36
37.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
38 :functions: intel_runtime_pm_disable_interrupts
39
40.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
41 :functions: intel_runtime_pm_enable_interrupts
42
43Intel GVT-g Guest Support(vGPU)
44-------------------------------
45
46.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
47 :doc: Intel GVT-g guest support
48
49.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
50 :internal:
51
52Display Hardware Handling
53=========================
54
55This section covers everything related to the display hardware including
56the mode setting infrastructure, plane, sprite and cursor handling and
57display, output probing and related topics.
58
59Mode Setting Infrastructure
60---------------------------
61
62The i915 driver is thus far the only DRM driver which doesn't use the
63common DRM helper code to implement mode setting sequences. Thus it has
64its own tailor-made infrastructure for executing a display configuration
65change.
66
67Frontbuffer Tracking
68--------------------
69
70.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
71 :doc: frontbuffer tracking
72
73.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
74 :internal:
75
76.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
77 :functions: i915_gem_track_fb
78
79Display FIFO Underrun Reporting
80-------------------------------
81
82.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
83 :doc: fifo underrun handling
84
85.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
86 :internal:
87
88Plane Configuration
89-------------------
90
91This section covers plane configuration and composition with the primary
92plane, sprites, cursors and overlays. This includes the infrastructure
93to do atomic vsync'ed updates of all this state and also tightly coupled
94topics like watermark setup and computation, framebuffer compression and
95panel self refresh.
96
97Atomic Plane Helpers
98--------------------
99
100.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
101 :doc: atomic plane helpers
102
103.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
104 :internal:
105
106Output Probing
107--------------
108
109This section covers output probing and related infrastructure like the
110hotplug interrupt storm detection and mitigation code. Note that the
111i915 driver still uses most of the common DRM helper code for output
112probing, so those sections fully apply.
113
114Hotplug
115-------
116
117.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
118 :doc: Hotplug
119
120.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
121 :internal:
122
123High Definition Audio
124---------------------
125
126.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
127 :doc: High Definition Audio over HDMI and Display Port
128
129.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
130 :internal:
131
132.. kernel-doc:: include/drm/i915_component.h
133 :internal:
134
135Panel Self Refresh PSR (PSR/SRD)
136--------------------------------
137
138.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
139 :doc: Panel Self Refresh (PSR/SRD)
140
141.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
142 :internal:
143
144Frame Buffer Compression (FBC)
145------------------------------
146
147.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
148 :doc: Frame Buffer Compression (FBC)
149
150.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
151 :internal:
152
153Display Refresh Rate Switching (DRRS)
154-------------------------------------
155
156.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
157 :doc: Display Refresh Rate Switching (DRRS)
158
159.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
160 :functions: intel_dp_set_drrs_state
161
162.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
163 :functions: intel_edp_drrs_enable
164
165.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
166 :functions: intel_edp_drrs_disable
167
168.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
169 :functions: intel_edp_drrs_invalidate
170
171.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
172 :functions: intel_edp_drrs_flush
173
174.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
175 :functions: intel_dp_drrs_init
176
177DPIO
178----
179
180.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
181 :doc: DPIO
182
183CSR firmware support for DMC
184----------------------------
185
186.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
187 :doc: csr support for dmc
188
189.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
190 :internal:
191
192Video BIOS Table (VBT)
193----------------------
194
195.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
196 :doc: Video BIOS Table (VBT)
197
198.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
199 :internal:
200
201.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
202 :internal:
203
204Memory Management and Command Submission
205========================================
206
207This sections covers all things related to the GEM implementation in the
208i915 driver.
209
210Batchbuffer Parsing
211-------------------
212
213.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
214 :doc: batch buffer command parser
215
216.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
217 :internal:
218
219Batchbuffer Pools
220-----------------
221
222.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
223 :doc: batch pool
224
225.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
226 :internal:
227
228Logical Rings, Logical Ring Contexts and Execlists
229--------------------------------------------------
230
231.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
232 :doc: Logical Rings, Logical Ring Contexts and Execlists
233
234.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
235 :internal:
236
237Global GTT views
238----------------
239
240.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
241 :doc: Global GTT views
242
243.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
244 :internal:
245
246GTT Fences and Swizzling
247------------------------
248
249.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
250 :internal:
251
252Global GTT Fence Handling
253~~~~~~~~~~~~~~~~~~~~~~~~~
254
255.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
256 :doc: fence register handling
257
258Hardware Tiling and Swizzling Details
259~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
260
261.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
262 :doc: tiling swizzling details
263
264Object Tiling IOCTLs
265--------------------
266
267.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
268 :internal:
269
270.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
271 :doc: buffer object tiling
272
273Buffer Object Eviction
274----------------------
275
276This section documents the interface functions for evicting buffer
277objects to make space available in the virtual gpu address spaces. Note
278that this is mostly orthogonal to shrinking buffer objects caches, which
279has the goal to make main memory (shared with the gpu through the
280unified memory architecture) available.
281
282.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_evict.c
283 :internal:
284
285Buffer Object Memory Shrinking
286------------------------------
287
288This section documents the interface function for shrinking memory usage
289of buffer object caches. Shrinking is used to make main memory
290available. Note that this is mostly orthogonal to evicting buffer
291objects, which has the goal to make space in gpu virtual address spaces.
292
293.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
294 :internal:
295
296GuC
297===
298
299GuC-specific firmware loader
300----------------------------
301
302.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
303 :doc: GuC-specific firmware loader
304
305.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
306 :internal:
307
308GuC-based command submission
309----------------------------
310
311.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
312 :doc: GuC-based command submission
313
314.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
315 :internal:
316
317GuC Firmware Layout
318-------------------
319
320.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h
321 :doc: GuC Firmware Layout
322
323Tracing
324=======
325
326This sections covers all things related to the tracepoints implemented
327in the i915 driver.
328
329i915_ppgtt_create and i915_ppgtt_release
330----------------------------------------
331
332.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
333 :doc: i915_ppgtt_create and i915_ppgtt_release tracepoints
334
335i915_context_create and i915_context_free
336-----------------------------------------
337
338.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
339 :doc: i915_context_create and i915_context_free tracepoints
340
341switch_mm
342---------
343
344.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
345 :doc: switch_mm tracepoint
346
347.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
new file mode 100644
index 000000000000..fcac0fa72056
--- /dev/null
+++ b/Documentation/gpu/index.rst
@@ -0,0 +1,14 @@
1==================================
2Linux GPU Driver Developer's Guide
3==================================
4
5.. toctree::
6
7 introduction
8 drm-internals
9 drm-mm
10 drm-kms
11 drm-kms-helpers
12 drm-uapi
13 i915
14 vga-switcheroo
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
new file mode 100644
index 000000000000..1903595b5310
--- /dev/null
+++ b/Documentation/gpu/introduction.rst
@@ -0,0 +1,51 @@
1============
2Introduction
3============
4
5The Linux DRM layer contains code intended to support the needs of
6complex graphics devices, usually containing programmable pipelines well
7suited to 3D graphics acceleration. Graphics drivers in the kernel may
8make use of DRM functions to make tasks like memory management,
9interrupt handling and DMA easier, and provide a uniform interface to
10applications.
11
12A note on versions: this guide covers features found in the DRM tree,
13including the TTM memory manager, output configuration and mode setting,
14and the new vblank internals, in addition to all the regular features
15found in current kernels.
16
17[Insert diagram of typical DRM stack here]
18
19Style Guidelines
20================
21
22For consistency this documentation uses American English. Abbreviations
23are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
24on. To aid in reading, documentations make full use of the markup
25characters kerneldoc provides: @parameter for function parameters,
26@member for structure members, &structure to reference structures and
27function() for functions. These all get automatically hyperlinked if
28kerneldoc for the referenced objects exists. When referencing entries in
29function vtables please use ->vfunc(). Note that kerneldoc does not
30support referencing struct members directly, so please add a reference
31to the vtable struct somewhere in the same paragraph or at least
32section.
33
34Except in special situations (to separate locked from unlocked variants)
35locking requirements for functions aren't documented in the kerneldoc.
36Instead locking should be check at runtime using e.g.
37``WARN_ON(!mutex_is_locked(...));``. Since it's much easier to ignore
38documentation than runtime noise this provides more value. And on top of
39that runtime checks do need to be updated when the locking rules change,
40increasing the chances that they're correct. Within the documentation
41the locking rules should be explained in the relevant structures: Either
42in the comment for the lock explaining what it protects, or data fields
43need a note about which lock protects them, or both.
44
45Functions which have a non-\ ``void`` return value should have a section
46called "Returns" explaining the expected return values in different
47cases and their meanings. Currently there's no consensus whether that
48section name should be all upper-case or not, and whether it should end
49in a colon or not. Go with the file-local style. Other common section
50names are "Notes" with information for dangerous or tricky corner cases,
51and "FIXME" where the interface could be cleaned up.
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
new file mode 100644
index 000000000000..b6fcaf639c04
--- /dev/null
+++ b/Documentation/gpu/kms-properties.csv
@@ -0,0 +1,128 @@
1Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
2DRM,Generic,“rotationâ€,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
3,,“scaling modeâ€,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
4,Connector,“EDIDâ€,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
5,,“DPMSâ€,ENUM,"{ “Onâ€, “Standbyâ€, “Suspendâ€, “Off†}",Connector,Contains DPMS operation mode value.
6,,“PATHâ€,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
7,,“TILEâ€,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
8,,“CRTC_IDâ€,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
9,Plane,“typeâ€,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
10,,“SRC_Xâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
11,,“SRC_Yâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
12,,“SRC_Wâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
13,,“SRC_Hâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
14,,“CRTC_Xâ€,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
15,,“CRTC_Yâ€,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
16,,“CRTC_Wâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
17,,“CRTC_Hâ€,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
18,,“FB_IDâ€,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
19,,“CRTC_IDâ€,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
20,DVI-I,“subconnectorâ€,ENUM,"{ “Unknownâ€, “DVI-Dâ€, “DVI-A†}",Connector,TBD
21,,“select subconnectorâ€,ENUM,"{ “Automaticâ€, “DVI-Dâ€, “DVI-A†}",Connector,TBD
22,TV,“subconnectorâ€,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
23,,“select subconnectorâ€,ENUM,"{ ""Automatic"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
24,,“modeâ€,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
25,,“left marginâ€,RANGE,"Min=0, Max=100",Connector,TBD
26,,“right marginâ€,RANGE,"Min=0, Max=100",Connector,TBD
27,,“top marginâ€,RANGE,"Min=0, Max=100",Connector,TBD
28,,“bottom marginâ€,RANGE,"Min=0, Max=100",Connector,TBD
29,,“brightnessâ€,RANGE,"Min=0, Max=100",Connector,TBD
30,,“contrastâ€,RANGE,"Min=0, Max=100",Connector,TBD
31,,“flicker reductionâ€,RANGE,"Min=0, Max=100",Connector,TBD
32,,“overscanâ€,RANGE,"Min=0, Max=100",Connector,TBD
33,,“saturationâ€,RANGE,"Min=0, Max=100",Connector,TBD
34,,“hueâ€,RANGE,"Min=0, Max=100",Connector,TBD
35,Virtual GPU,“suggested Xâ€,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
36,,“suggested Yâ€,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
37,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
38,,“dirtyâ€,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
39,,“DEGAMMA_LUTâ€,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
40,,“DEGAMMA_LUT_SIZEâ€,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
41,,“CTMâ€,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
42,,“GAMMA_LUTâ€,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
43,,“GAMMA_LUT_SIZEâ€,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
44i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
45,,“audioâ€,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
46,SDVO-TV,“modeâ€,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
47,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
48,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
49,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
50,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
51,,“hposâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
52,,“vposâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
53,,“contrastâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
54,,“saturationâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
55,,“hueâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
56,,“sharpnessâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
57,,“flicker_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
58,,“flicker_filter_adaptiveâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
59,,“flicker_filter_2dâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
60,,“tv_chroma_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
61,,“tv_luma_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
62,,“dot_crawlâ€,RANGE,"Min=0, Max=1",Connector,TBD
63,SDVO-TV/LVDS,“brightnessâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
64CDV gma-500,Generic,"""Broadcast RGB""",ENUM,"{ “Fullâ€, “Limited 16:235†}",Connector,TBD
65,,"""Broadcast RGB""",ENUM,"{ “offâ€, “autoâ€, “on†}",Connector,TBD
66Poulsbo,Generic,“backlightâ€,RANGE,"Min=0, Max=100",Connector,TBD
67,SDVO-TV,“modeâ€,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
68,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
69,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
70,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
71,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
72,,“hposâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
73,,“vposâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
74,,“contrastâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
75,,“saturationâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
76,,“hueâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
77,,“sharpnessâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
78,,“flicker_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
79,,“flicker_filter_adaptiveâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
80,,“flicker_filter_2dâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
81,,“tv_chroma_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
82,,“tv_luma_filterâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
83,,“dot_crawlâ€,RANGE,"Min=0, Max=1",Connector,TBD
84,SDVO-TV/LVDS,“brightnessâ€,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
85armada,CRTC,"""CSC_YUV""",ENUM,"{ ""Auto"" , ""CCIR601"", ""CCIR709"" }",CRTC,TBD
86,,"""CSC_RGB""",ENUM,"{ ""Auto"", ""Computer system"", ""Studio"" }",CRTC,TBD
87,Overlay,"""colorkey""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
88,,"""colorkey_min""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
89,,"""colorkey_max""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
90,,"""colorkey_val""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
91,,"""colorkey_alpha""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
92,,"""colorkey_mode""",ENUM,"{ ""disabled"", ""Y component"", ""U component"" , ""V component"", ""RGB"", “R component"", ""G component"", ""B component"" }",Plane,TBD
93,,"""brightness""",RANGE,"Min=0, Max=256 + 255",Plane,TBD
94,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
95,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
96exynos,CRTC,“modeâ€,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
97,Overlay,“zposâ€,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
98i2c/ch7006_drv,Generic,“scaleâ€,RANGE,"Min=0, Max=2",Connector,TBD
99,TV,“modeâ€,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, â€PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
100nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
101,,“contrastâ€,RANGE,"Min=0, Max=8192-1",Plane,TBD
102,,“brightnessâ€,RANGE,"Min=0, Max=1024",Plane,TBD
103,,“hueâ€,RANGE,"Min=0, Max=359",Plane,TBD
104,,“saturationâ€,RANGE,"Min=0, Max=8192-1",Plane,TBD
105,,“iturbt_709â€,RANGE,"Min=0, Max=1",Plane,TBD
106,Nv04 Overlay,“colorkeyâ€,RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
107,,“brightnessâ€,RANGE,"Min=0, Max=1024",Plane,TBD
108,Display,“dithering modeâ€,ENUM,"{ ""auto"", ""off"", ""on"" }",Connector,TBD
109,,“dithering depthâ€,ENUM,"{ ""auto"", ""off"", ""on"", ""static 2x2"", ""dynamic 2x2"", ""temporal"" }",Connector,TBD
110,,“underscanâ€,ENUM,"{ ""auto"", ""6 bpc"", ""8 bpc"" }",Connector,TBD
111,,“underscan hborderâ€,RANGE,"Min=0, Max=128",Connector,TBD
112,,“underscan vborderâ€,RANGE,"Min=0, Max=128",Connector,TBD
113,,“vibrant hueâ€,RANGE,"Min=0, Max=180",Connector,TBD
114,,“color vibranceâ€,RANGE,"Min=0, Max=200",Connector,TBD
115omap,Generic,“zorderâ€,RANGE,"Min=0, Max=3","CRTC, Plane",TBD
116qxl,Generic,"“hotplug_mode_update""",RANGE,"Min=0, Max=1",Connector,TBD
117radeon,DVI-I,“coherentâ€,RANGE,"Min=0, Max=1",Connector,TBD
118,DAC enable load detect,“load detectionâ€,RANGE,"Min=0, Max=1",Connector,TBD
119,TV Standard,"""tv standard""",ENUM,"{ ""ntsc"", ""pal"", ""pal-m"", ""pal-60"", ""ntsc-j"" , ""scart-pal"", ""pal-cn"", ""secam"" }",Connector,TBD
120,legacy TMDS PLL detect,"""tmds_pll""",ENUM,"{ ""driver"", ""bios"" }",-,TBD
121,Underscan,"""underscan""",ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
122,,"""underscan hborder""",RANGE,"Min=0, Max=128",Connector,TBD
123,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
124,Audio,“audioâ€,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
125,FMT Dithering,“ditherâ€,ENUM,"{ ""off"", ""on"" }",Connector,TBD
126rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
127,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
128,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
diff --git a/Documentation/gpu/vga-switcheroo.rst b/Documentation/gpu/vga-switcheroo.rst
new file mode 100644
index 000000000000..327d930a2229
--- /dev/null
+++ b/Documentation/gpu/vga-switcheroo.rst
@@ -0,0 +1,102 @@
1==============
2VGA Switcheroo
3==============
4
5.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
6 :doc: Overview
7
8Modes of Use
9============
10
11Manual switching and manual power control
12-----------------------------------------
13
14.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
15 :doc: Manual switching and manual power control
16
17Driver power control
18--------------------
19
20.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
21 :doc: Driver power control
22
23API
24===
25
26Public functions
27----------------
28
29.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
30 :export:
31
32Public structures
33-----------------
34
35.. kernel-doc:: include/linux/vga_switcheroo.h
36 :functions: vga_switcheroo_handler
37
38.. kernel-doc:: include/linux/vga_switcheroo.h
39 :functions: vga_switcheroo_client_ops
40
41Public constants
42----------------
43
44.. kernel-doc:: include/linux/vga_switcheroo.h
45 :functions: vga_switcheroo_handler_flags_t
46
47.. kernel-doc:: include/linux/vga_switcheroo.h
48 :functions: vga_switcheroo_client_id
49
50.. kernel-doc:: include/linux/vga_switcheroo.h
51 :functions: vga_switcheroo_state
52
53Private structures
54------------------
55
56.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
57 :functions: vgasr_priv
58
59.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
60 :functions: vga_switcheroo_client
61
62Handlers
63========
64
65apple-gmux Handler
66------------------
67
68.. kernel-doc:: drivers/platform/x86/apple-gmux.c
69 :doc: Overview
70
71.. kernel-doc:: drivers/platform/x86/apple-gmux.c
72 :doc: Interrupt
73
74Graphics mux
75~~~~~~~~~~~~
76
77.. kernel-doc:: drivers/platform/x86/apple-gmux.c
78 :doc: Graphics mux
79
80Power control
81~~~~~~~~~~~~~
82
83.. kernel-doc:: drivers/platform/x86/apple-gmux.c
84 :doc: Power control
85
86Backlight control
87~~~~~~~~~~~~~~~~~
88
89.. kernel-doc:: drivers/platform/x86/apple-gmux.c
90 :doc: Backlight control
91
92Public functions
93~~~~~~~~~~~~~~~~
94
95.. kernel-doc:: include/linux/apple-gmux.h
96 :internal:
97
98.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/vga/vga_switcheroo.c
99
100.. WARNING: DOCPROC directive not supported: !Cinclude/linux/vga_switcheroo.h
101
102.. WARNING: DOCPROC directive not supported: !Cdrivers/platform/x86/apple-gmux.c
diff --git a/Documentation/index.rst b/Documentation/index.rst
new file mode 100644
index 000000000000..dacc77b43c29
--- /dev/null
+++ b/Documentation/index.rst
@@ -0,0 +1,24 @@
1.. The Linux Kernel documentation master file, created by
2 sphinx-quickstart on Fri Feb 12 13:51:46 2016.
3 You can adapt this file completely to your liking, but it should at least
4 contain the root `toctree` directive.
5
6Welcome to The Linux Kernel's documentation!
7============================================
8
9Nothing for you to see here *yet*. Please move along.
10
11Contents:
12
13.. toctree::
14 :maxdepth: 2
15
16 gpu/index
17
18Indices and tables
19==================
20
21* :ref:`genindex`
22* :ref:`modindex`
23* :ref:`search`
24
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index 35f6a982a0d5..220d0a80ca2c 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -170,21 +170,92 @@ document trapinfo
170 address the kernel panicked. 170 address the kernel panicked.
171end 171end
172 172
173define dump_log_idx
174 set $idx = $arg0
175 if ($argc > 1)
176 set $prev_flags = $arg1
177 else
178 set $prev_flags = 0
179 end
180 set $msg = ((struct printk_log *) (log_buf + $idx))
181 set $prefix = 1
182 set $newline = 1
183 set $log = log_buf + $idx + sizeof(*$msg)
173 184
174define dmesg 185 # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
175 set $i = 0 186 if (($prev_flags & 8) && !($msg->flags & 4))
176 set $end_idx = (log_end - 1) & (log_buf_len - 1) 187 set $prefix = 0
188 end
189
190 # msg->flags & LOG_CONT
191 if ($msg->flags & 8)
192 # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
193 if (($prev_flags & 8) && !($prev_flags & 2))
194 set $prefix = 0
195 end
196 # (!(msg->flags & LOG_NEWLINE))
197 if (!($msg->flags & 2))
198 set $newline = 0
199 end
200 end
201
202 if ($prefix)
203 printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
204 end
205 if ($msg->text_len != 0)
206 eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
207 end
208 if ($newline)
209 printf "\n"
210 end
211 if ($msg->dict_len > 0)
212 set $dict = $log + $msg->text_len
213 set $idx = 0
214 set $line = 1
215 while ($idx < $msg->dict_len)
216 if ($line)
217 printf " "
218 set $line = 0
219 end
220 set $c = $dict[$idx]
221 if ($c == '\0')
222 printf "\n"
223 set $line = 1
224 else
225 if ($c < ' ' || $c >= 127 || $c == '\\')
226 printf "\\x%02x", $c
227 else
228 printf "%c", $c
229 end
230 end
231 set $idx = $idx + 1
232 end
233 printf "\n"
234 end
235end
236document dump_log_idx
237 Dump a single log given its index in the log buffer. The first
238 parameter is the index into log_buf, the second is optional and
239 specified the previous log buffer's flags, used for properly
240 formatting continued lines.
241end
177 242
178 while ($i < logged_chars) 243define dmesg
179 set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) 244 set $i = log_first_idx
245 set $end_idx = log_first_idx
246 set $prev_flags = 0
180 247
181 if ($idx + 100 <= $end_idx) || \ 248 while (1)
182 ($end_idx <= $idx && $idx + 100 < log_buf_len) 249 set $msg = ((struct printk_log *) (log_buf + $i))
183 printf "%.100s", &log_buf[$idx] 250 if ($msg->len == 0)
184 set $i = $i + 100 251 set $i = 0
185 else 252 else
186 printf "%c", log_buf[$idx] 253 dump_log_idx $i $prev_flags
187 set $i = $i + 1 254 set $i = $i + $msg->len
255 set $prev_flags = $msg->flags
256 end
257 if ($i == $end_idx)
258 loop_break
188 end 259 end
189 end 260 end
190end 261end
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 82b42c958d1c..a2a662d4da83 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3992,8 +3992,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3992 3992
3993 trace_event=[event-list] 3993 trace_event=[event-list]
3994 [FTRACE] Set and start specified trace events in order 3994 [FTRACE] Set and start specified trace events in order
3995 to facilitate early boot debugging. 3995 to facilitate early boot debugging. The event-list is a
3996 See also Documentation/trace/events.txt 3996 comma separated list of trace events to enable. See
3997 also Documentation/trace/events.txt
3997 3998
3998 trace_options=[option-list] 3999 trace_options=[option-list]
3999 [FTRACE] Enable or disable tracer options at boot. 4000 [FTRACE] Enable or disable tracer options at boot.
diff --git a/Documentation/leds/leds-class.txt b/Documentation/leds/leds-class.txt
index d406d98339b2..44f5e6bccd97 100644
--- a/Documentation/leds/leds-class.txt
+++ b/Documentation/leds/leds-class.txt
@@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking,
74however, it is better to use the API function led_blink_set(), as it 74however, it is better to use the API function led_blink_set(), as it
75will check and implement software fallback if necessary. 75will check and implement software fallback if necessary.
76 76
77To turn off blinking again, use the API function led_brightness_set() 77To turn off blinking, use the API function led_brightness_set()
78as that will not just set the LED brightness but also stop any software 78with brightness value LED_OFF, which should stop any software
79timers that may have been required for blinking. 79timers that may have been required for blinking.
80 80
81The blink_set() function should choose a user friendly blinking value 81The blink_set() function should choose a user friendly blinking value
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index 30fb842a976d..49db1def1721 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -1538,9 +1538,9 @@ set_cmdline(struct mic_info *mic)
1538 1538
1539 len = snprintf(buffer, PATH_MAX, 1539 len = snprintf(buffer, PATH_MAX,
1540 "clocksource=tsc highres=off nohz=off "); 1540 "clocksource=tsc highres=off nohz=off ");
1541 len += snprintf(buffer + len, PATH_MAX, 1541 len += snprintf(buffer + len, PATH_MAX - len,
1542 "cpufreq_on;corec6_off;pc3_off;pc6_off "); 1542 "cpufreq_on;corec6_off;pc3_off;pc6_off ");
1543 len += snprintf(buffer + len, PATH_MAX, 1543 len += snprintf(buffer + len, PATH_MAX - len,
1544 "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0", 1544 "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0",
1545 mic->id + 1); 1545 mic->id + 1);
1546 1546
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 631b0f7ae16f..9d05ed7f7da5 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -369,8 +369,6 @@ does not allocate any driver private context space.
369Switch configuration 369Switch configuration
370-------------------- 370--------------------
371 371
372- priv_size: additional size needed by the switch driver for its private context
373
374- tag_protocol: this is to indicate what kind of tagging protocol is supported, 372- tag_protocol: this is to indicate what kind of tagging protocol is supported,
375 should be a valid value from the dsa_tag_protocol enum 373 should be a valid value from the dsa_tag_protocol enum
376 374
@@ -416,11 +414,6 @@ PHY devices and link management
416 to the switch port MDIO registers. If unavailable return a negative error 414 to the switch port MDIO registers. If unavailable return a negative error
417 code. 415 code.
418 416
419- poll_link: Function invoked by DSA to query the link state of the switch
420 builtin Ethernet PHYs, per port. This function is responsible for calling
421 netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
422 single call. Executes from workqueue context.
423
424- adjust_link: Function invoked by the PHY library when a slave network device 417- adjust_link: Function invoked by the PHY library when a slave network device
425 is attached to a PHY device. This function is responsible for appropriately 418 is attached to a PHY device. This function is responsible for appropriately
426 configuring the switch port link parameters: speed, duplex, pause based on 419 configuring the switch port link parameters: speed, duplex, pause based on
@@ -542,6 +535,16 @@ Bridge layer
542Bridge VLAN filtering 535Bridge VLAN filtering
543--------------------- 536---------------------
544 537
538- port_vlan_filtering: bridge layer function invoked when the bridge gets
539 configured for turning on or off VLAN filtering. If nothing specific needs to
540 be done at the hardware level, this callback does not need to be implemented.
541 When VLAN filtering is turned on, the hardware must be programmed with
542 rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
543 VLAN ID map/rules. If there is no PVID programmed into the switch port,
544 untagged frames must be rejected as well. When turned off the switch must
545 accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
546 allowed.
547
545- port_vlan_prepare: bridge layer function invoked when the bridge prepares the 548- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
546 configuration of a VLAN on the given port. If the operation is not supported 549 configuration of a VLAN on the given port. If the operation is not supported
547 by the hardware, this function should return -EOPNOTSUPP to inform the bridge 550 by the hardware, this function should return -EOPNOTSUPP to inform the bridge
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6c7f365b1515..9ae929395b24 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
1036 1036
1037shared_media - BOOLEAN 1037shared_media - BOOLEAN
1038 Send(router) or accept(host) RFC1620 shared media redirects. 1038 Send(router) or accept(host) RFC1620 shared media redirects.
1039 Overrides ip_secure_redirects. 1039 Overrides secure_redirects.
1040 shared_media for the interface will be enabled if at least one of 1040 shared_media for the interface will be enabled if at least one of
1041 conf/{all,interface}/shared_media is set to TRUE, 1041 conf/{all,interface}/shared_media is set to TRUE,
1042 it will be disabled otherwise 1042 it will be disabled otherwise
1043 default TRUE 1043 default TRUE
1044 1044
1045secure_redirects - BOOLEAN 1045secure_redirects - BOOLEAN
1046 Accept ICMP redirect messages only for gateways, 1046 Accept ICMP redirect messages only to gateways listed in the
1047 listed in default gateway list. 1047 interface's current gateway list. Even if disabled, RFC1122 redirect
1048 rules still apply.
1049 Overridden by shared_media.
1048 secure_redirects for the interface will be enabled if at least one of 1050 secure_redirects for the interface will be enabled if at least one of
1049 conf/{all,interface}/secure_redirects is set to TRUE, 1051 conf/{all,interface}/secure_redirects is set to TRUE,
1050 it will be disabled otherwise 1052 it will be disabled otherwise
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index 8638f61c8c9d..37eca00796ee 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -263,19 +263,23 @@ scmd->allowed.
263 263
264 3. scmd recovered 264 3. scmd recovered
265 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd 265 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
266 - shost->host_failed--
267 - clear scmd->eh_eflags 266 - clear scmd->eh_eflags
268 - scsi_setup_cmd_retry() 267 - scsi_setup_cmd_retry()
269 - move from local eh_work_q to local eh_done_q 268 - move from local eh_work_q to local eh_done_q
270 LOCKING: none 269 LOCKING: none
270 CONCURRENCY: at most one thread per separate eh_work_q to
271 keep queue manipulation lockless
271 272
272 4. EH completes 273 4. EH completes
273 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper 274 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
274 layer of failure. 275 layer of failure. May be called concurrently but must have
276 a no more than one thread per separate eh_work_q to
277 manipulate the queue locklessly
275 - scmd is removed from eh_done_q and scmd->eh_entry is cleared 278 - scmd is removed from eh_done_q and scmd->eh_entry is cleared
276 - if retry is necessary, scmd is requeued using 279 - if retry is necessary, scmd is requeued using
277 scsi_queue_insert() 280 scsi_queue_insert()
278 - otherwise, scsi_finish_command() is invoked for scmd 281 - otherwise, scsi_finish_command() is invoked for scmd
282 - zero shost->host_failed
279 LOCKING: queue or finish function performs appropriate locking 283 LOCKING: queue or finish function performs appropriate locking
280 284
281 285
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 20d05719bceb..3849814bfe6d 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -826,7 +826,8 @@ The keyctl syscall functions are:
826 (*) Compute a Diffie-Hellman shared secret or public key 826 (*) Compute a Diffie-Hellman shared secret or public key
827 827
828 long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params, 828 long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
829 char *buffer, size_t buflen); 829 char *buffer, size_t buflen,
830 void *reserved);
830 831
831 The params struct contains serial numbers for three keys: 832 The params struct contains serial numbers for three keys:
832 833
@@ -843,6 +844,8 @@ The keyctl syscall functions are:
843 public key. If the base is the remote public key, the result is 844 public key. If the base is the remote public key, the result is
844 the shared secret. 845 the shared secret.
845 846
847 The reserved argument must be set to NULL.
848
846 The buffer length must be at least the length of the prime, or zero. 849 The buffer length must be at least the length of the prime, or zero.
847 850
848 If the buffer length is nonzero, the length of the result is 851 If the buffer length is nonzero, the length of the result is
diff --git a/Documentation/security/self-protection.txt b/Documentation/security/self-protection.txt
index babd6378ec05..3010576c9fca 100644
--- a/Documentation/security/self-protection.txt
+++ b/Documentation/security/self-protection.txt
@@ -183,8 +183,9 @@ provide meaningful defenses.
183### Canaries, blinding, and other secrets 183### Canaries, blinding, and other secrets
184 184
185It should be noted that things like the stack canary discussed earlier 185It should be noted that things like the stack canary discussed earlier
186are technically statistical defenses, since they rely on a (leakable) 186are technically statistical defenses, since they rely on a secret value,
187secret value. 187and such values may become discoverable through an information exposure
188flaw.
188 189
189Blinding literal values for things like JITs, where the executable 190Blinding literal values for things like JITs, where the executable
190contents may be partially under the control of userspace, need a similar 191contents may be partially under the control of userspace, need a similar
@@ -199,8 +200,8 @@ working?) in order to maximize their success.
199Since the location of kernel memory is almost always instrumental in 200Since the location of kernel memory is almost always instrumental in
200mounting a successful attack, making the location non-deterministic 201mounting a successful attack, making the location non-deterministic
201raises the difficulty of an exploit. (Note that this in turn makes 202raises the difficulty of an exploit. (Note that this in turn makes
202the value of leaks higher, since they may be used to discover desired 203the value of information exposures higher, since they may be used to
203memory locations.) 204discover desired memory locations.)
204 205
205#### Text and module base 206#### Text and module base
206 207
@@ -222,14 +223,21 @@ become more difficult to locate.
222Much of the kernel's dynamic memory (e.g. kmalloc, vmalloc, etc) ends up 223Much of the kernel's dynamic memory (e.g. kmalloc, vmalloc, etc) ends up
223being relatively deterministic in layout due to the order of early-boot 224being relatively deterministic in layout due to the order of early-boot
224initializations. If the base address of these areas is not the same 225initializations. If the base address of these areas is not the same
225between boots, targeting them is frustrated, requiring a leak specific 226between boots, targeting them is frustrated, requiring an information
226to the region. 227exposure specific to the region.
228
229#### Structure layout
230
231By performing a per-build randomization of the layout of sensitive
232structures, attacks must either be tuned to known kernel builds or expose
233enough kernel memory to determine structure layouts before manipulating
234them.
227 235
228 236
229## Preventing Leaks 237## Preventing Information Exposures
230 238
231Since the locations of sensitive structures are the primary target for 239Since the locations of sensitive structures are the primary target for
232attacks, it is important to defend against leaks of both kernel memory 240attacks, it is important to defend against exposure of both kernel memory
233addresses and kernel memory contents (since they may contain kernel 241addresses and kernel memory contents (since they may contain kernel
234addresses or other sensitive things like canary values). 242addresses or other sensitive things like canary values).
235 243
@@ -250,8 +258,8 @@ sure structure holes are cleared.
250When releasing memory, it is best to poison the contents (clear stack on 258When releasing memory, it is best to poison the contents (clear stack on
251syscall return, wipe heap memory on a free), to avoid reuse attacks that 259syscall return, wipe heap memory on a free), to avoid reuse attacks that
252rely on the old contents of memory. This frustrates many uninitialized 260rely on the old contents of memory. This frustrates many uninitialized
253variable attacks, stack info leaks, heap info leaks, and use-after-free 261variable attacks, stack content exposures, heap content exposures, and
254attacks. 262use-after-free attacks.
255 263
256### Destination tracking 264### Destination tracking
257 265
diff --git a/Documentation/sphinx/convert_template.sed b/Documentation/sphinx/convert_template.sed
new file mode 100644
index 000000000000..c1503fcca4ec
--- /dev/null
+++ b/Documentation/sphinx/convert_template.sed
@@ -0,0 +1,18 @@
1#
2# Pandoc doesn't grok <function> or <structname>, so convert them
3# ahead of time.
4#
5# Use the following escapes to pass through pandoc:
6# $bq = "`"
7# $lt = "<"
8# $gt = ">"
9#
10s%<function>\([^<(]\+\)()</function>%:c:func:$bq\1()$bq%g
11s%<function>\([^<(]\+\)</function>%:c:func:$bq\1()$bq%g
12s%<structname>struct *\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
13s%struct <structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
14s%<structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
15#
16# Wrap docproc directives in para and code blocks.
17#
18s%^\(!.*\)$%<para><code>DOCPROC: \1</code></para>%
diff --git a/Documentation/sphinx/kernel-doc.py b/Documentation/sphinx/kernel-doc.py
new file mode 100644
index 000000000000..4adfb0e91ecc
--- /dev/null
+++ b/Documentation/sphinx/kernel-doc.py
@@ -0,0 +1,127 @@
1# coding=utf-8
2#
3# Copyright © 2016 Intel Corporation
4#
5# Permission is hereby granted, free of charge, to any person obtaining a
6# copy of this software and associated documentation files (the "Software"),
7# to deal in the Software without restriction, including without limitation
8# the rights to use, copy, modify, merge, publish, distribute, sublicense,
9# and/or sell copies of the Software, and to permit persons to whom the
10# Software is furnished to do so, subject to the following conditions:
11#
12# The above copyright notice and this permission notice (including the next
13# paragraph) shall be included in all copies or substantial portions of the
14# Software.
15#
16# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22# IN THE SOFTWARE.
23#
24# Authors:
25# Jani Nikula <jani.nikula@intel.com>
26#
27# Please make sure this works on both python2 and python3.
28#
29
30import os
31import subprocess
32import sys
33import re
34
35from docutils import nodes, statemachine
36from docutils.statemachine import ViewList
37from docutils.parsers.rst import directives
38from sphinx.util.compat import Directive
39
40class KernelDocDirective(Directive):
41 """Extract kernel-doc comments from the specified file"""
42 required_argument = 1
43 optional_arguments = 4
44 option_spec = {
45 'doc': directives.unchanged_required,
46 'functions': directives.unchanged_required,
47 'export': directives.flag,
48 'internal': directives.flag,
49 }
50 has_content = False
51
52 def run(self):
53 env = self.state.document.settings.env
54 cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
55
56 filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
57
58 # Tell sphinx of the dependency
59 env.note_dependency(os.path.abspath(filename))
60
61 tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
62 source = filename
63
64 # FIXME: make this nicer and more robust against errors
65 if 'export' in self.options:
66 cmd += ['-export']
67 elif 'internal' in self.options:
68 cmd += ['-internal']
69 elif 'doc' in self.options:
70 cmd += ['-function', str(self.options.get('doc'))]
71 elif 'functions' in self.options:
72 for f in str(self.options.get('functions')).split(' '):
73 cmd += ['-function', f]
74
75 cmd += [filename]
76
77 try:
78 env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))
79
80 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
81 out, err = p.communicate()
82
83 # python2 needs conversion to unicode.
84 # python3 with universal_newlines=True returns strings.
85 if sys.version_info.major < 3:
86 out, err = unicode(out, 'utf-8'), unicode(err, 'utf-8')
87
88 if p.returncode != 0:
89 sys.stderr.write(err)
90
91 env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
92 return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
93 elif env.config.kerneldoc_verbosity > 0:
94 sys.stderr.write(err)
95
96 lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
97 result = ViewList()
98
99 lineoffset = 0;
100 line_regex = re.compile("^#define LINENO ([0-9]+)$")
101 for line in lines:
102 match = line_regex.search(line)
103 if match:
104 # sphinx counts lines from 0
105 lineoffset = int(match.group(1)) - 1
106 # we must eat our comments since the upset the markup
107 else:
108 result.append(line, source, lineoffset)
109 lineoffset += 1
110
111 node = nodes.section()
112 node.document = self.state.document
113 self.state.nested_parse(result, self.content_offset, node)
114
115 return node.children
116
117 except Exception as e:
118 env.app.warn('kernel-doc \'%s\' processing failed with: %s' %
119 (" ".join(cmd), str(e)))
120 return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
121
122def setup(app):
123 app.add_config_value('kerneldoc_bin', None, 'env')
124 app.add_config_value('kerneldoc_srctree', None, 'env')
125 app.add_config_value('kerneldoc_verbosity', 1, 'env')
126
127 app.add_directive('kernel-doc', KernelDocDirective)
diff --git a/Documentation/sphinx/post_convert.sed b/Documentation/sphinx/post_convert.sed
new file mode 100644
index 000000000000..392770bac53b
--- /dev/null
+++ b/Documentation/sphinx/post_convert.sed
@@ -0,0 +1,23 @@
1#
2# Unescape.
3#
4s/$bq/`/g
5s/$lt/</g
6s/$gt/>/g
7#
8# pandoc thinks that both "_" needs to be escaped. Remove the extra
9# backslashes.
10#
11s/\\_/_/g
12#
13# Unwrap docproc directives.
14#
15s/^``DOCPROC: !E\(.*\)``$/.. kernel-doc:: \1\n :export:/
16s/^``DOCPROC: !I\(.*\)``$/.. kernel-doc:: \1\n :internal:/
17s/^``DOCPROC: !F\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n :functions: \2/
18s/^``DOCPROC: !P\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n :doc: \2/
19s/^``DOCPROC: \(!.*\)``$/.. WARNING: DOCPROC directive not supported: \1/
20#
21# Trim trailing whitespace.
22#
23s/[[:space:]]*$//
diff --git a/Documentation/sphinx/tmplcvt b/Documentation/sphinx/tmplcvt
new file mode 100755
index 000000000000..909a73065e0a
--- /dev/null
+++ b/Documentation/sphinx/tmplcvt
@@ -0,0 +1,19 @@
1#!/bin/bash
2#
3# Convert a template file into something like RST
4#
5# fix <function>
6# feed to pandoc
7# fix \_
8# title line?
9#
10
11in=$1
12rst=$2
13tmp=$rst.tmp
14
15cp $in $tmp
16sed --in-place -f convert_template.sed $tmp
17pandoc -s -S -f docbook -t rst -o $rst $tmp
18sed --in-place -f post_convert.sed $rst
19rm $tmp
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index eaf8297dbca2..e8e2ebafe5fa 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -6,8 +6,8 @@
6 6
7This document serves as a guide for device drivers writers on what the 7This document serves as a guide for device drivers writers on what the
8sync_file API is, and how drivers can support it. Sync file is the carrier of 8sync_file API is, and how drivers can support it. Sync file is the carrier of
9the fences(struct fence) that needs to synchronized between drivers or across 9the fences(struct fence) that are needed to synchronize between drivers or
10process boundaries. 10across process boundaries.
11 11
12The sync_file API is meant to be used to send and receive fence information 12The sync_file API is meant to be used to send and receive fence information
13to/from userspace. It enables userspace to do explicit fencing, where instead 13to/from userspace. It enables userspace to do explicit fencing, where instead
@@ -32,7 +32,7 @@ in-fences and out-fences
32Sync files can go either to or from userspace. When a sync_file is sent from 32Sync files can go either to or from userspace. When a sync_file is sent from
33the driver to userspace we call the fences it contains 'out-fences'. They are 33the driver to userspace we call the fences it contains 'out-fences'. They are
34related to a buffer that the driver is processing or is going to process, so 34related to a buffer that the driver is processing or is going to process, so
35the driver an create out-fence to be able to notify, through fence_signal(), 35the driver creates an out-fence to be able to notify, through fence_signal(),
36when it has finished using (or processing) that buffer. Out-fences are fences 36when it has finished using (or processing) that buffer. Out-fences are fences
37that the driver creates. 37that the driver creates.
38 38
diff --git a/Documentation/zh_CN/CodingStyle b/Documentation/zh_CN/CodingStyle
index 654afd72eb24..bbb9d6ae05ca 100644
--- a/Documentation/zh_CN/CodingStyle
+++ b/Documentation/zh_CN/CodingStyle
@@ -24,34 +24,33 @@ Documentation/CodingStyle的中文翻译
24 24
25 Linux内核代ç é£Žæ ¼ 25 Linux内核代ç é£Žæ ¼
26 26
27这是一个简短的文档,æè¿°äº†linux内核的首选代ç é£Žæ ¼ã€‚代ç é£Žæ ¼æ˜¯å› äººè€Œå¼‚的,而且我 27这是一个简短的文档,æè¿°äº† linux 内核的首选代ç é£Žæ ¼ã€‚代ç é£Žæ ¼æ˜¯å› äººè€Œå¼‚的,而且我
28䏿„¿æ„把我的观点强加给任何人,ä¸è¿‡è¿™é‡Œæ‰€è®²è¿°çš„æ˜¯æˆ‘å¿…é¡»è¦ç»´æŠ¤çš„ä»£ç æ‰€éµå®ˆçš„风格, 28䏿„¿æ„æŠŠè‡ªå·±çš„è§‚ç‚¹å¼ºåŠ ç»™ä»»ä½•äººï¼Œä½†è¿™å°±åƒæˆ‘去åšä»»ä½•事情都必须éµå¾ªçš„原则那样,我也
29并且我也希望ç»å¤§å¤šæ•°å…¶ä»–代ç ä¹Ÿèƒ½éµå®ˆè¿™ä¸ªé£Žæ ¼ã€‚è¯·åœ¨å†™ä»£ç æ—¶è‡³å°‘考虑一下本文所述的 29希望在ç»å¤§å¤šæ•°äº‹ä¸Šä¿æŒè¿™ç§çš„æ€åº¦ã€‚è¯·ï¼ˆåœ¨å†™ä»£ç æ—¶ï¼‰è‡³å°‘考虑一下这里的代ç é£Žæ ¼ã€‚
30风格。
31 30
32首先,我建议你打å°ä¸€ä»½GNU代ç è§„范,然åŽä¸è¦è¯»å®ƒã€‚烧了它,这是一个具有é‡å¤§è±¡å¾æ€§ 31首先,我建议你打å°ä¸€ä»½ GNU 代ç è§„范,然åŽä¸è¦è¯»ã€‚烧了它,这是一个具有é‡å¤§è±¡å¾æ€§æ„义
33æ„义的动作。 32的动作。
34 33
35ä¸ç®¡æ€Žæ ·ï¼ŒçŽ°åœ¨æˆ‘ä»¬å¼€å§‹ï¼š 34ä¸ç®¡æ€Žæ ·ï¼ŒçŽ°åœ¨æˆ‘ä»¬å¼€å§‹ï¼š
36 35
37 36
38 第一章:缩进 37 第一章:缩进
39 38
40制表符是8个字符,所以缩进也是8个字符。有些异端è¿åŠ¨è¯•å›¾å°†ç¼©è¿›å˜ä¸º4(乃至2)个字符 39制表符是 8 个字符,所以缩进也是 8 个字符。有些异端è¿åŠ¨è¯•å›¾å°†ç¼©è¿›å˜ä¸º 4(甚至 2ï¼ï¼
41深,这几乎相当于å°è¯•将圆周率的值定义为3。 40个字符深,这几乎相当于å°è¯•将圆周率的值定义为 3。
42 41
43ç†ç”±ï¼šç¼©è¿›çš„全部æ„义就在于清楚的定义一个控制å—起止于何处。尤其是当你盯ç€ä½ çš„å±å¹• 42ç†ç”±ï¼šç¼©è¿›çš„全部æ„义就在于清楚的定义一个控制å—起止于何处。尤其是当你盯ç€ä½ çš„å±å¹•
44连续看了20å°æ—¶ä¹‹åŽï¼Œä½ å°†ä¼šå‘现大一点的缩进会使你更容易分辨缩进。 43连续看了 20 å°æ—¶ä¹‹åŽï¼Œä½ å°†ä¼šå‘现大一点的缩进会使你更容易分辨缩进。
45 44
46现在,有些人会抱怨8个字符的缩进会使代ç å‘å³è¾¹ç§»åŠ¨çš„å¤ªè¿œï¼Œåœ¨80个字符的终端å±å¹•上 45现在,有些人会抱怨 8 个字符的缩进会使代ç å‘å³è¾¹ç§»åŠ¨çš„å¤ªè¿œï¼Œåœ¨ 80 个字符的终端å±å¹•上
47就很难读这样的代ç ã€‚这个问题的答案是,如果你需è¦3级以上的缩进,ä¸ç®¡ç”¨ä½•ç§æ–¹å¼ä½  46就很难读这样的代ç ã€‚è¿™ä¸ªé—®é¢˜çš„ç­”æ¡ˆæ˜¯ï¼Œå¦‚æžœä½ éœ€è¦ 3 级以上的缩进,ä¸ç®¡ç”¨ä½•ç§æ–¹å¼ä½ 
48的代ç å·²ç»æœ‰é—®é¢˜äº†ï¼Œåº”该修正你的程åºã€‚ 47的代ç å·²ç»æœ‰é—®é¢˜äº†ï¼Œåº”该修正你的程åºã€‚
49 48
50简而言之,8个字符的缩进å¯ä»¥è®©ä»£ç æ›´å®¹æ˜“阅读,还有一个好处是当你的函数嵌套太深的 49简而言之,8 个字符的缩进å¯ä»¥è®©ä»£ç æ›´å®¹æ˜“阅读,还有一个好处是当你的函数嵌套太深的
51时候å¯ä»¥ç»™ä½ è­¦å‘Šã€‚留心这个警告。 50时候å¯ä»¥ç»™ä½ è­¦å‘Šã€‚留心这个警告。
52 51
53在switch语å¥ä¸­æ¶ˆé™¤å¤šçº§ç¼©è¿›çš„é¦–é€‰çš„æ–¹å¼æ˜¯è®©â€œswitchâ€å’Œä»Žå±žäºŽå®ƒçš„“caseâ€æ ‡ç­¾å¯¹é½äºŽåŒ 52在 switch 语å¥ä¸­æ¶ˆé™¤å¤šçº§ç¼©è¿›çš„é¦–é€‰çš„æ–¹å¼æ˜¯è®© “switch†和从属于它的 “case†标签
54一列,而ä¸è¦â€œä¸¤æ¬¡ç¼©è¿›â€â€œcaseâ€æ ‡ç­¾ã€‚比如: 53对é½äºŽåŒä¸€åˆ—,而ä¸è¦ “两次缩进†“case†标签。比如:
55 54
56 switch (suffix) { 55 switch (suffix) {
57 case 'G': 56 case 'G':
@@ -70,7 +69,6 @@ Documentation/CodingStyle的中文翻译
70 break; 69 break;
71 } 70 }
72 71
73
74ä¸è¦æŠŠå¤šä¸ªè¯­å¥æ”¾åœ¨ä¸€è¡Œé‡Œï¼Œé™¤éžä½ æœ‰ä»€ä¹ˆä¸œè¥¿è¦éšè—: 72ä¸è¦æŠŠå¤šä¸ªè¯­å¥æ”¾åœ¨ä¸€è¡Œé‡Œï¼Œé™¤éžä½ æœ‰ä»€ä¹ˆä¸œè¥¿è¦éšè—:
75 73
76 if (condition) do_this; 74 if (condition) do_this;
@@ -79,7 +77,7 @@ Documentation/CodingStyle的中文翻译
79也ä¸è¦åœ¨ä¸€è¡Œé‡Œæ”¾å¤šä¸ªèµ‹å€¼è¯­å¥ã€‚内核代ç é£Žæ ¼è¶…级简å•。就是é¿å…å¯èƒ½å¯¼è‡´åˆ«äººè¯¯è¯»çš„表 77也ä¸è¦åœ¨ä¸€è¡Œé‡Œæ”¾å¤šä¸ªèµ‹å€¼è¯­å¥ã€‚内核代ç é£Žæ ¼è¶…级简å•。就是é¿å…å¯èƒ½å¯¼è‡´åˆ«äººè¯¯è¯»çš„表
80è¾¾å¼ã€‚ 78è¾¾å¼ã€‚
81 79
82é™¤äº†æ³¨é‡Šã€æ–‡æ¡£å’ŒKconfig之外,ä¸è¦ä½¿ç”¨ç©ºæ ¼æ¥ç¼©è¿›ï¼Œå‰é¢çš„例孿˜¯ä¾‹å¤–,是有æ„为之。 80é™¤äº†æ³¨é‡Šã€æ–‡æ¡£å’Œ Kconfig 之外,ä¸è¦ä½¿ç”¨ç©ºæ ¼æ¥ç¼©è¿›ï¼Œå‰é¢çš„例孿˜¯ä¾‹å¤–,是有æ„为之。
83 81
84选用一个好的编辑器,ä¸è¦åœ¨è¡Œå°¾ç•™ç©ºæ ¼ã€‚ 82选用一个好的编辑器,ä¸è¦åœ¨è¡Œå°¾ç•™ç©ºæ ¼ã€‚
85 83
@@ -88,27 +86,18 @@ Documentation/CodingStyle的中文翻译
88 86
89代ç é£Žæ ¼çš„æ„ä¹‰å°±åœ¨äºŽä½¿ç”¨å¹³å¸¸ä½¿ç”¨çš„å·¥å…·æ¥ç»´æŒä»£ç çš„å¯è¯»æ€§å’Œå¯ç»´æŠ¤æ€§ã€‚ 87代ç é£Žæ ¼çš„æ„ä¹‰å°±åœ¨äºŽä½¿ç”¨å¹³å¸¸ä½¿ç”¨çš„å·¥å…·æ¥ç»´æŒä»£ç çš„å¯è¯»æ€§å’Œå¯ç»´æŠ¤æ€§ã€‚
90 88
91æ¯ä¸€è¡Œçš„长度的é™åˆ¶æ˜¯80列,我们强烈建议您éµå®ˆè¿™ä¸ªæƒ¯ä¾‹ã€‚ 89æ¯ä¸€è¡Œçš„长度的é™åˆ¶æ˜¯ 80 列,我们强烈建议您éµå®ˆè¿™ä¸ªæƒ¯ä¾‹ã€‚
92 90
93长于80列的语å¥è¦æ‰“æ•£æˆæœ‰æ„义的片段。æ¯ä¸ªç‰‡æ®µè¦æ˜Žæ˜¾çŸ­äºŽåŽŸæ¥çš„语å¥ï¼Œè€Œä¸”放置的ä½ç½® 91长于 80 列的语å¥è¦æ‰“æ•£æˆæœ‰æ„义的片段。除éžè¶…过 80 列能显著增加å¯è¯»æ€§ï¼Œå¹¶ä¸”ä¸ä¼šéšè—
94也明显的é å³ã€‚åŒæ ·çš„è§„åˆ™ä¹Ÿé€‚ç”¨äºŽæœ‰å¾ˆé•¿å‚æ•°åˆ—è¡¨çš„å‡½æ•°å¤´ã€‚é•¿å­—ç¬¦ä¸²ä¹Ÿè¦æ‰“æ•£æˆè¾ƒçŸ­çš„ 92ä¿¡æ¯ã€‚å­ç‰‡æ®µè¦æ˜Žæ˜¾çŸ­äºŽæ¯ç‰‡æ®µï¼Œå¹¶æ˜Žæ˜¾é å³ã€‚è¿™åŒæ ·é€‚用于有ç€å¾ˆé•¿å‚数列表的函数头。
95字符串。唯一的例外是超过80列å¯ä»¥å¤§å¹…度æé«˜å¯è¯»æ€§å¹¶ä¸”ä¸ä¼šéšè—ä¿¡æ¯çš„æƒ…况。 93然而,ç»å¯¹ä¸è¦æ‰“散对用户å¯è§çš„字符串,例如 printk ä¿¡æ¯ï¼Œå› ä¸ºè¿™å°†å¯¼è‡´æ— æ³• grep 这些
96 94ä¿¡æ¯ã€‚
97void fun(int a, int b, int c)
98{
99 if (condition)
100 printk(KERN_WARNING "Warning this is a long printk with "
101 "3 parameters a: %u b: %u "
102 "c: %u \n", a, b, c);
103 else
104 next_statement;
105}
106 95
107 第三章:大括å·å’Œç©ºæ ¼çš„æ”¾ç½® 96 第三章:大括å·å’Œç©ºæ ¼çš„æ”¾ç½®
108 97
109C语言风格中å¦å¤–一个常è§é—®é¢˜æ˜¯å¤§æ‹¬å·çš„æ”¾ç½®ã€‚和缩进大å°ä¸åŒï¼Œé€‰æ‹©æˆ–弃用æŸç§æ”¾ç½®ç­– 98C语言风格中å¦å¤–一个常è§é—®é¢˜æ˜¯å¤§æ‹¬å·çš„æ”¾ç½®ã€‚和缩进大å°ä¸åŒï¼Œé€‰æ‹©æˆ–弃用æŸç§æ”¾ç½®ç­–
110略并没有多少技术上的原因,ä¸è¿‡é¦–选的方å¼ï¼Œå°±åƒKernighanå’ŒRitchie展示给我们的,是 99略并没有多少技术上的原因,ä¸è¿‡é¦–选的方å¼ï¼Œå°±åƒ Kernighan å’Œ Ritchie 展示给我们的,
111æŠŠèµ·å§‹å¤§æ‹¬å·æ”¾åœ¨è¡Œå°¾ï¼Œè€ŒæŠŠç»“æŸå¤§æ‹¬å·æ”¾åœ¨è¡Œé¦–,所以: 100æ˜¯æŠŠèµ·å§‹å¤§æ‹¬å·æ”¾åœ¨è¡Œå°¾ï¼Œè€ŒæŠŠç»“æŸå¤§æ‹¬å·æ”¾åœ¨è¡Œé¦–,所以:
112 101
113 if (x is true) { 102 if (x is true) {
114 we do y 103 we do y
@@ -134,12 +123,12 @@ C语言风格中å¦å¤–一个常è§é—®é¢˜æ˜¯å¤§æ‹¬å·çš„æ”¾ç½®ã€‚和缩进大å°ä
134 body of function 123 body of function
135 } 124 }
136 125
137全世界的异端å¯èƒ½ä¼šæŠ±æ€¨è¿™ä¸ªä¸ä¸€è‡´æ€§æ˜¯â€¦â€¦å‘ƒâ€¦â€¦ä¸ä¸€è‡´çš„,ä¸è¿‡æ‰€æœ‰æ€ç»´å¥å…¨çš„人都知é“( 126全世界的异端å¯èƒ½ä¼šæŠ±æ€¨è¿™ä¸ªä¸ä¸€è‡´æ€§æ˜¯â€¦â€¦å‘ƒâ€¦â€¦ä¸ä¸€è‡´çš„,ä¸è¿‡æ‰€æœ‰æ€ç»´å¥å…¨çš„人都知é“
138a)K&R是_正确的_,并且(b)K&R是正确的。此外,ä¸ç®¡æ€Žæ ·å‡½æ•°éƒ½æ˜¯ç‰¹æ®Šçš„(在C语言中 127(a) K&R 是 _正确的_,并且 (b) K&R 是正确的。此外,ä¸ç®¡æ€Žæ ·å‡½æ•°éƒ½æ˜¯ç‰¹æ®Šçš„(C
139,函数是ä¸èƒ½åµŒå¥—的)。 128函数是ä¸èƒ½åµŒå¥—的)。
140 129
141注æ„结æŸå¤§æ‹¬å·ç‹¬è‡ªå æ®ä¸€è¡Œï¼Œé™¤éžå®ƒåŽé¢è·Ÿç€åŒä¸€ä¸ªè¯­å¥çš„剩余部分,也就是do语å¥ä¸­çš„ 130注æ„结æŸå¤§æ‹¬å·ç‹¬è‡ªå æ®ä¸€è¡Œï¼Œé™¤éžå®ƒåŽé¢è·Ÿç€åŒä¸€ä¸ªè¯­å¥çš„剩余部分,也就是 do 语å¥ä¸­çš„
142“whileâ€æˆ–者if语å¥ä¸­çš„“elseâ€ï¼Œåƒè¿™æ ·ï¼š 131“while†或者 if 语å¥ä¸­çš„ “elseâ€ï¼Œåƒè¿™æ ·ï¼š
143 132
144 do { 133 do {
145 body of do-loop 134 body of do-loop
@@ -158,41 +147,50 @@ a)K&R是_正确的_,并且(b)K&R是正确的。此外,ä¸ç®¡æ€Žæ ·å‡½æ
158ç†ç”±ï¼šK&R。 147ç†ç”±ï¼šK&R。
159 148
160也请注æ„è¿™ç§å¤§æ‹¬å·çš„æ”¾ç½®æ–¹å¼ä¹Ÿèƒ½ä½¿ç©ºï¼ˆæˆ–者差ä¸å¤šç©ºçš„ï¼‰è¡Œçš„æ•°é‡æœ€å°åŒ–ï¼ŒåŒæ—¶ä¸å¤±å¯ 149也请注æ„è¿™ç§å¤§æ‹¬å·çš„æ”¾ç½®æ–¹å¼ä¹Ÿèƒ½ä½¿ç©ºï¼ˆæˆ–者差ä¸å¤šç©ºçš„ï¼‰è¡Œçš„æ•°é‡æœ€å°åŒ–ï¼ŒåŒæ—¶ä¸å¤±å¯
161读性。因此,由于你的å±å¹•上的新行是ä¸å¯å†ç”Ÿèµ„æºï¼ˆæƒ³æƒ³25行的终端å±å¹•),你将会有更 150读性。因此,由于你的å±å¹•上的新行是ä¸å¯å†ç”Ÿèµ„æºï¼ˆæƒ³æƒ³ 25 行的终端å±å¹•),你将会有更
162å¤šçš„ç©ºè¡Œæ¥æ”¾ç½®æ³¨é‡Šã€‚ 151å¤šçš„ç©ºè¡Œæ¥æ”¾ç½®æ³¨é‡Šã€‚
163 152
164å½“åªæœ‰ä¸€ä¸ªå•独的语å¥çš„æ—¶å€™ï¼Œä¸ç”¨åŠ ä¸å¿…è¦çš„大括å·ã€‚ 153å½“åªæœ‰ä¸€ä¸ªå•独的语å¥çš„æ—¶å€™ï¼Œä¸ç”¨åŠ ä¸å¿…è¦çš„大括å·ã€‚
165 154
166if (condition) 155 if (condition)
167 action(); 156 action();
157
158和
159
160 if (condition)
161 do_this();
162 else
163 do_that();
168 164
169这点ä¸é€‚用于本身为æŸä¸ªæ¡ä»¶è¯­å¥çš„一个分支的å•独语å¥ã€‚这时需è¦åœ¨ä¸¤ä¸ªåˆ†æ”¯é‡Œéƒ½ä½¿ç”¨å¤§ 165这并ä¸é€‚ç”¨äºŽåªæœ‰ä¸€ä¸ªæ¡ä»¶åˆ†æ”¯æ˜¯å•语å¥çš„æƒ…况;这时所有分支都è¦ä½¿ç”¨å¤§æ‹¬å·ï¼š
170括å·ã€‚
171 166
172if (condition) { 167 if (condition) {
173 do_this(); 168 do_this();
174 do_that(); 169 do_that();
175} else { 170 } else {
176 otherwise(); 171 otherwise();
177} 172 }
178 173
179 3.1:空格 174 3.1:空格
180 175
181Linux内核的空格使用方å¼ï¼ˆä¸»è¦ï¼‰å–å†³äºŽå®ƒæ˜¯ç”¨äºŽå‡½æ•°è¿˜æ˜¯å…³é”®å­—ã€‚ï¼ˆå¤§å¤šæ•°ï¼‰å…³é”®å­—åŽ 176Linux 内核的空格使用方å¼ï¼ˆä¸»è¦ï¼‰å–决于它是用于函数还是关键字。(大多数)关键字åŽ
182è¦åŠ ä¸€ä¸ªç©ºæ ¼ã€‚å€¼å¾—æ³¨æ„的例外是sizeofã€typeofã€alignofå’Œ__attribute__,这些关键字 177è¦åŠ ä¸€ä¸ªç©ºæ ¼ã€‚å€¼å¾—æ³¨æ„的例外是 sizeofã€typeofã€alignof å’Œ __attribute__,这些
183æŸäº›ç¨‹åº¦ä¸Šçœ‹èµ·æ¥æ›´åƒå‡½æ•°ï¼ˆå®ƒä»¬åœ¨Linux里也常常伴éšå°æ‹¬å·è€Œä½¿ç”¨ï¼Œå°½ç®¡åœ¨C语言里这样 178关键字æŸäº›ç¨‹åº¦ä¸Šçœ‹èµ·æ¥æ›´åƒå‡½æ•°ï¼ˆå®ƒä»¬åœ¨ Linux 里也常常伴éšå°æ‹¬å·è€Œä½¿ç”¨ï¼Œå°½ç®¡åœ¨ C 里
184çš„å°æ‹¬å·ä¸æ˜¯å¿…需的,就åƒâ€œstruct fileinfo infoâ€å£°æ˜Žè¿‡åŽçš„“sizeof infoâ€ï¼‰ã€‚ 179è¿™æ ·çš„å°æ‹¬å·ä¸æ˜¯å¿…éœ€çš„ï¼Œå°±åƒ â€œstruct fileinfo info†声明过åŽçš„ “sizeof infoâ€ï¼‰ã€‚
185 180
186æ‰€ä»¥åœ¨è¿™äº›å…³é”®å­—ä¹‹åŽæ”¾ä¸€ä¸ªç©ºæ ¼ï¼š 181æ‰€ä»¥åœ¨è¿™äº›å…³é”®å­—ä¹‹åŽæ”¾ä¸€ä¸ªç©ºæ ¼ï¼š
182
187 if, switch, case, for, do, while 183 if, switch, case, for, do, while
188但是ä¸è¦åœ¨sizeofã€typeofã€alignof或者__attribute__è¿™äº›å…³é”®å­—ä¹‹åŽæ”¾ç©ºæ ¼ã€‚例如, 184
185但是ä¸è¦åœ¨ sizeofã€typeofã€alignof 或者 __attribute__ è¿™äº›å…³é”®å­—ä¹‹åŽæ”¾ç©ºæ ¼ã€‚例如,
186
189 s = sizeof(struct file); 187 s = sizeof(struct file);
190 188
191ä¸è¦åœ¨å°æ‹¬å·é‡Œçš„表达å¼ä¸¤ä¾§åŠ ç©ºæ ¼ã€‚è¿™æ˜¯ä¸€ä¸ªå例: 189ä¸è¦åœ¨å°æ‹¬å·é‡Œçš„表达å¼ä¸¤ä¾§åŠ ç©ºæ ¼ã€‚è¿™æ˜¯ä¸€ä¸ªå例:
192 190
193 s = sizeof( struct file ); 191 s = sizeof( struct file );
194 192
195当声明指针类型或者返回指针类型的函数时,“*â€çš„é¦–é€‰ä½¿ç”¨æ–¹å¼æ˜¯ä½¿ä¹‹é è¿‘å˜é‡å或者函 193当声明指针类型或者返回指针类型的函数时,“*â€ çš„é¦–é€‰ä½¿ç”¨æ–¹å¼æ˜¯ä½¿ä¹‹é è¿‘å˜é‡å或者函
196æ•°åï¼Œè€Œä¸æ˜¯é è¿‘类型å。例å­ï¼š 194æ•°åï¼Œè€Œä¸æ˜¯é è¿‘类型å。例å­ï¼š
197 195
198 char *linux_banner; 196 char *linux_banner;
@@ -204,15 +202,18 @@ Linux内核的空格使用方å¼ï¼ˆä¸»è¦ï¼‰å–决于它是用于函数还是关
204 = + - < > * / % | & ^ <= >= == != ? : 202 = + - < > * / % | & ^ <= >= == != ? :
205 203
206但是一元æ“作符åŽä¸è¦åŠ ç©ºæ ¼ï¼š 204但是一元æ“作符åŽä¸è¦åŠ ç©ºæ ¼ï¼š
205
207 & * + - ~ ! sizeof typeof alignof __attribute__ defined 206 & * + - ~ ! sizeof typeof alignof __attribute__ defined
208 207
209åŽç¼€è‡ªåŠ å’Œè‡ªå‡ä¸€å…ƒæ“作符å‰ä¸åŠ ç©ºæ ¼ï¼š 208åŽç¼€è‡ªåŠ å’Œè‡ªå‡ä¸€å…ƒæ“作符å‰ä¸åŠ ç©ºæ ¼ï¼š
209
210 ++ -- 210 ++ --
211 211
212å‰ç¼€è‡ªåŠ å’Œè‡ªå‡ä¸€å…ƒæ“作符åŽä¸åŠ ç©ºæ ¼ï¼š 212å‰ç¼€è‡ªåŠ å’Œè‡ªå‡ä¸€å…ƒæ“作符åŽä¸åŠ ç©ºæ ¼ï¼š
213
213 ++ -- 214 ++ --
214 215
215“.â€å’Œâ€œ->â€ç»“构体æˆå‘˜æ“作符å‰åŽä¸åŠ ç©ºæ ¼ã€‚ 216‘.â€å’Œ “->†结构体æˆå‘˜æ“作符å‰åŽä¸åŠ ç©ºæ ¼ã€‚
216 217
217ä¸è¦åœ¨è¡Œå°¾ç•™ç©ºç™½ã€‚有些å¯ä»¥è‡ªåŠ¨ç¼©è¿›çš„ç¼–è¾‘å™¨ä¼šåœ¨æ–°è¡Œçš„è¡Œé¦–åŠ å…¥é€‚é‡çš„空白,然åŽä½  218ä¸è¦åœ¨è¡Œå°¾ç•™ç©ºç™½ã€‚有些å¯ä»¥è‡ªåŠ¨ç¼©è¿›çš„ç¼–è¾‘å™¨ä¼šåœ¨æ–°è¡Œçš„è¡Œé¦–åŠ å…¥é€‚é‡çš„空白,然åŽä½ 
218å°±å¯ä»¥ç›´æŽ¥åœ¨é‚£ä¸€è¡Œè¾“入代ç ã€‚ä¸è¿‡å‡å¦‚ä½ æœ€åŽæ²¡æœ‰åœ¨é‚£ä¸€è¡Œè¾“入代ç ï¼Œæœ‰äº›ç¼–è¾‘å™¨å°±ä¸ 219å°±å¯ä»¥ç›´æŽ¥åœ¨é‚£ä¸€è¡Œè¾“入代ç ã€‚ä¸è¿‡å‡å¦‚ä½ æœ€åŽæ²¡æœ‰åœ¨é‚£ä¸€è¡Œè¾“入代ç ï¼Œæœ‰äº›ç¼–辑器就ä¸
@@ -225,23 +226,23 @@ Linux内核的空格使用方å¼ï¼ˆä¸»è¦ï¼‰å–决于它是用于函数还是关
225 226
226 第四章:命å 227 第四章:命å
227 228
228C是一个简朴的语言,你的命å也应该这样。和Modula-2å’ŒPascal程åºå‘˜ä¸åŒï¼ŒC程åºå‘˜ä¸ä½¿ 229C是一个简朴的语言,你的命å也应该这样。和 Modula-2 å’Œ Pascal 程åºå‘˜ä¸åŒï¼ŒC 程åºå‘˜
229用类似ThisVariableIsATemporaryCounter这样åŽä¸½çš„å字。C程åºå‘˜ä¼šç§°é‚£ä¸ªå˜é‡ä¸ºâ€œtmp†230ä¸ä½¿ç”¨ç±»ä¼¼ ThisVariableIsATemporaryCounter 这样åŽä¸½çš„å字。C 程åºå‘˜ä¼šç§°é‚£ä¸ªå˜é‡
230,这样写起æ¥ä¼šæ›´å®¹æ˜“,而且至少ä¸ä¼šä»¤å…¶éš¾äºŽç†è§£ã€‚ 231为 “tmpâ€ï¼Œè¿™æ ·å†™èµ·æ¥ä¼šæ›´å®¹æ˜“,而且至少ä¸ä¼šä»¤å…¶éš¾äºŽç†è§£ã€‚
231 232
232ä¸è¿‡ï¼Œè™½ç„¶æ··ç”¨å¤§å°å†™çš„åå­—æ˜¯ä¸æå€¡ä½¿ç”¨çš„ï¼Œä½†æ˜¯å…¨å±€å˜é‡è¿˜æ˜¯éœ€è¦ä¸€ä¸ªå…·æè¿°æ€§çš„åå­— 233ä¸è¿‡ï¼Œè™½ç„¶æ··ç”¨å¤§å°å†™çš„åå­—æ˜¯ä¸æå€¡ä½¿ç”¨çš„ï¼Œä½†æ˜¯å…¨å±€å˜é‡è¿˜æ˜¯éœ€è¦ä¸€ä¸ªå…·æè¿°æ€§çš„åå­—
233。称一个全局函数为“fooâ€æ˜¯ä¸€ä¸ªéš¾ä»¥é¥¶æ•的错误。 234。称一个全局函数为 “foo†是一个难以饶æ•的错误。
234 235
235全局å˜é‡ï¼ˆåªæœ‰å½“你真正需è¦å®ƒä»¬çš„æ—¶å€™å†ç”¨å®ƒï¼‰éœ€è¦æœ‰ä¸€ä¸ªå…·æè¿°æ€§çš„å字,就åƒå…¨å±€å‡½ 236全局å˜é‡ï¼ˆåªæœ‰å½“你真正需è¦å®ƒä»¬çš„æ—¶å€™å†ç”¨å®ƒï¼‰éœ€è¦æœ‰ä¸€ä¸ªå…·æè¿°æ€§çš„å字,就åƒå…¨å±€å‡½
236数。如果你有一个å¯ä»¥è®¡ç®—活动用户数é‡çš„函数,你应该å«å®ƒâ€œcount_active_users()â€æˆ–者 237数。如果你有一个å¯ä»¥è®¡ç®—活动用户数é‡çš„函数,你应该å«å®ƒ “count_active_users()â€
237类似的å字,你ä¸åº”该å«å®ƒâ€œcntuser()â€ã€‚ 238或者类似的å字,你ä¸åº”该å«å®ƒ “cntuser()â€ã€‚
238 239
239在函数å中包å«å‡½æ•°ç±»åž‹ï¼ˆæ‰€è°“çš„åŒˆç‰™åˆ©å‘½åæ³•)是脑å­å‡ºäº†é—®é¢˜â€”—编译器知é“那些类型而 240在函数å中包å«å‡½æ•°ç±»åž‹ï¼ˆæ‰€è°“çš„åŒˆç‰™åˆ©å‘½åæ³•)是脑å­å‡ºäº†é—®é¢˜â€”—编译器知é“那些类型而
240且能够检查那些类型,这样åšåªèƒ½æŠŠç¨‹åºå‘˜å¼„糊涂了。难怪微软总是制造出有问题的程åºã€‚ 241且能够检查那些类型,这样åšåªèƒ½æŠŠç¨‹åºå‘˜å¼„糊涂了。难怪微软总是制造出有问题的程åºã€‚
241 242
242本地å˜é‡å应该简短,而且能够表达相关的å«ä¹‰ã€‚å¦‚æžœä½ æœ‰ä¸€äº›éšæœºçš„æ•´æ•°åž‹çš„循环计数器 243本地å˜é‡å应该简短,而且能够表达相关的å«ä¹‰ã€‚å¦‚æžœä½ æœ‰ä¸€äº›éšæœºçš„æ•´æ•°åž‹çš„循环计数器
243,它应该被称为“iâ€ã€‚å«å®ƒâ€œloop_counterâ€å¹¶æ— ç›Šå¤„,如果它没有被误解的å¯èƒ½çš„è¯ã€‚类似 244,它应该被称为 “iâ€ã€‚å«å®ƒ “loop_counter†并无益处,如果它没有被误解的å¯èƒ½çš„è¯ã€‚
244的,“tmpâ€å¯ä»¥ç”¨æ¥ç§°å‘¼ä»»æ„类型的临时å˜é‡ã€‚ 245类似的,“tmp†å¯ä»¥ç”¨æ¥ç§°å‘¼ä»»æ„类型的临时å˜é‡ã€‚
245 246
246如果你怕混淆了你的本地å˜é‡å,你就é‡åˆ°å¦ä¸€ä¸ªé—®é¢˜äº†ï¼Œå«åšå‡½æ•°å¢žé•¿è·å°”蒙失衡综åˆç—‡ 247如果你怕混淆了你的本地å˜é‡å,你就é‡åˆ°å¦ä¸€ä¸ªé—®é¢˜äº†ï¼Œå«åšå‡½æ•°å¢žé•¿è·å°”蒙失衡综åˆç—‡
247。请看第六章(函数)。 248。请看第六章(函数)。
@@ -249,9 +250,9 @@ C是一个简朴的语言,你的命å也应该这样。和Modula-2å’ŒPascalç¨
249 250
250 第五章:Typedef 251 第五章:Typedef
251 252
252ä¸è¦ä½¿ç”¨ç±»ä¼¼â€œvps_tâ€ä¹‹ç±»çš„东西。 253ä¸è¦ä½¿ç”¨ç±»ä¼¼ “vps_t†之类的东西。
253 254
254对结构体和指针使用typedef是一个错误。当你在代ç é‡Œçœ‹åˆ°ï¼š 255对结构体和指针使用 typedef 是一个错误。当你在代ç é‡Œçœ‹åˆ°ï¼š
255 256
256 vps_t a; 257 vps_t a;
257 258
@@ -261,91 +262,91 @@ C是一个简朴的语言,你的命å也应该这样。和Modula-2å’ŒPascalç¨
261 262
262 struct virtual_container *a; 263 struct virtual_container *a;
263 264
264你就知é““aâ€æ˜¯ä»€ä¹ˆäº†ã€‚ 265ä½ å°±çŸ¥é“ â€œa†是什么了。
265 266
266很多人认为typedef“能æé«˜å¯è¯»æ€§â€ã€‚å®žé™…ä¸æ˜¯è¿™æ ·çš„。它们åªåœ¨ä¸‹åˆ—情况下有用: 267很多人认为 typedef “能æé«˜å¯è¯»æ€§â€ã€‚å®žé™…ä¸æ˜¯è¿™æ ·çš„。它们åªåœ¨ä¸‹åˆ—情况下有用:
267 268
268 (a) 完全ä¸é€æ˜Žçš„å¯¹è±¡ï¼ˆè¿™ç§æƒ…况下è¦ä¸»åŠ¨ä½¿ç”¨typedefæ¥éšè—这个对象实际上是什么)。 269 (a) 完全ä¸é€æ˜Žçš„å¯¹è±¡ï¼ˆè¿™ç§æƒ…况下è¦ä¸»åŠ¨ä½¿ç”¨ typedef æ¥éšè—这个对象实际上是什么)。
269 270
270 例如:“pte_tâ€ç­‰ä¸é€æ˜Žå¯¹è±¡ï¼Œä½ åªèƒ½ç”¨åˆé€‚的访问函数æ¥è®¿é—®å®ƒä»¬ã€‚ 271 例如:“pte_t†等ä¸é€æ˜Žå¯¹è±¡ï¼Œä½ åªèƒ½ç”¨åˆé€‚的访问函数æ¥è®¿é—®å®ƒä»¬ã€‚
271 272
272 注æ„ï¼ä¸é€æ˜Žæ€§å’Œâ€œè®¿é—®å‡½æ•°â€æœ¬èº«æ˜¯ä¸å¥½çš„。我们使用pte_t等类型的原因在于真的是 273 注æ„ï¼ä¸é€æ˜Žæ€§å’Œâ€œè®¿é—®å‡½æ•°â€æœ¬èº«æ˜¯ä¸å¥½çš„。我们使用 pte_t 等类型的原因在于真的是
273 完全没有任何共用的å¯è®¿é—®ä¿¡æ¯ã€‚ 274 完全没有任何共用的å¯è®¿é—®ä¿¡æ¯ã€‚
274 275
275 (b) 清楚的整数类型,如此,这层抽象就å¯ä»¥å¸®åŠ©æ¶ˆé™¤åˆ°åº•æ˜¯â€œintâ€è¿˜æ˜¯â€œlongâ€çš„æ··æ·†ã€‚ 276 (b) 清楚的整数类型,如此,这层抽象就å¯ä»¥å¸®åŠ©æ¶ˆé™¤åˆ°åº•æ˜¯ “int†还是 “long†的混淆。
276 277
277 u8/u16/u32是完全没有问题的typedef,ä¸è¿‡å®ƒä»¬æ›´ç¬¦åˆç±»åˆ«(d)è€Œä¸æ˜¯è¿™é‡Œã€‚ 278 u8/u16/u32 是完全没有问题的 typedef,ä¸è¿‡å®ƒä»¬æ›´ç¬¦åˆç±»åˆ« (d) è€Œä¸æ˜¯è¿™é‡Œã€‚
278 279
279 冿¬¡æ³¨æ„ï¼è¦è¿™æ ·åšï¼Œå¿…须事出有因。如果æŸä¸ªå˜é‡æ˜¯â€œunsigned longâ€œï¼Œé‚£ä¹ˆæ²¡æœ‰å¿…è¦ 280 冿¬¡æ³¨æ„ï¼è¦è¿™æ ·åšï¼Œå¿…须事出有因。如果æŸä¸ªå˜é‡æ˜¯ “unsigned long“,那么没有必è¦
280 281
281 typedef unsigned long myflags_t; 282 typedef unsigned long myflags_t;
282 283
283 ä¸è¿‡å¦‚果有一个明确的原因,比如它在æŸç§æƒ…况下å¯èƒ½ä¼šæ˜¯ä¸€ä¸ªâ€œunsigned intâ€è€Œåœ¨ 284 ä¸è¿‡å¦‚果有一个明确的原因,比如它在æŸç§æƒ…况下å¯èƒ½ä¼šæ˜¯ä¸€ä¸ª “unsigned int†而在
284 其他情况下å¯èƒ½ä¸ºâ€œunsigned longâ€ï¼Œé‚£ä¹ˆå°±ä¸è¦çŠ¹è±«ï¼Œè¯·åŠ¡å¿…ä½¿ç”¨typedef。 285 其他情况下å¯èƒ½ä¸º “unsigned longâ€ï¼Œé‚£ä¹ˆå°±ä¸è¦çŠ¹è±«ï¼Œè¯·åŠ¡å¿…ä½¿ç”¨ typedef。
285 286
286 (c) 当你使用sparse按字é¢çš„创建一个新类型æ¥åšç±»åž‹æ£€æŸ¥çš„æ—¶å€™ã€‚ 287 (c) 当你使用sparse按字é¢çš„创建一个新类型æ¥åšç±»åž‹æ£€æŸ¥çš„æ—¶å€™ã€‚
287 288
288 (d) 和标准C99类型相åŒçš„类型,在æŸäº›ä¾‹å¤–的情况下。 289 (d) 和标准C99类型相åŒçš„类型,在æŸäº›ä¾‹å¤–的情况下。
289 290
290 虽然让眼ç›å’Œè„‘ç­‹æ¥é€‚应新的标准类型比如“uint32_tâ€ä¸éœ€è¦èŠ±å¾ˆå¤šæ—¶é—´ï¼Œå¯æ˜¯æœ‰äº› 291 虽然让眼ç›å’Œè„‘ç­‹æ¥é€‚应新的标准类型比如 “uint32_t†ä¸éœ€è¦èŠ±å¾ˆå¤šæ—¶é—´ï¼Œå¯æ˜¯æœ‰äº›
291 人ä»ç„¶æ‹’ç»ä½¿ç”¨å®ƒä»¬ã€‚ 292 人ä»ç„¶æ‹’ç»ä½¿ç”¨å®ƒä»¬ã€‚
292 293
293 因此,Linux特有的等åŒäºŽæ ‡å‡†ç±»åž‹çš„“u8/u16/u32/u64â€ç±»åž‹å’Œå®ƒä»¬çš„æœ‰ç¬¦å·ç±»åž‹æ˜¯è¢« 294 因此,Linux 特有的等åŒäºŽæ ‡å‡†ç±»åž‹çš„ “u8/u16/u32/u64†类型和它们的有符å·ç±»åž‹æ˜¯è¢«
294 å…许的——尽管在你自己的新代ç ä¸­ï¼Œå®ƒä»¬ä¸æ˜¯å¼ºåˆ¶è¦æ±‚è¦ä½¿ç”¨çš„。 295 å…许的——尽管在你自己的新代ç ä¸­ï¼Œå®ƒä»¬ä¸æ˜¯å¼ºåˆ¶è¦æ±‚è¦ä½¿ç”¨çš„。
295 296
296 当编辑已ç»ä½¿ç”¨äº†æŸä¸ªç±»åž‹é›†çš„å·²æœ‰ä»£ç æ—¶ï¼Œä½ åº”该éµå¾ªé‚£äº›ä»£ç ä¸­å·²ç»åšå‡ºçš„选择。 297 当编辑已ç»ä½¿ç”¨äº†æŸä¸ªç±»åž‹é›†çš„å·²æœ‰ä»£ç æ—¶ï¼Œä½ åº”该éµå¾ªé‚£äº›ä»£ç ä¸­å·²ç»åšå‡ºçš„选择。
297 298
298 (e) å¯ä»¥åœ¨ç”¨æˆ·ç©ºé—´å®‰å…¨ä½¿ç”¨çš„类型。 299 (e) å¯ä»¥åœ¨ç”¨æˆ·ç©ºé—´å®‰å…¨ä½¿ç”¨çš„类型。
299 300
300 在æŸäº›ç”¨æˆ·ç©ºé—´å¯è§çš„结构体里,我们ä¸èƒ½è¦æ±‚C99类型而且ä¸èƒ½ç”¨ä¸Šé¢æåˆ°çš„“u32†301 在æŸäº›ç”¨æˆ·ç©ºé—´å¯è§çš„结构体里,我们ä¸èƒ½è¦æ±‚C99类型而且ä¸èƒ½ç”¨ä¸Šé¢æåˆ°çš„ “u32â€
301 类型。因此,我们在与用户空间共享的所有结构体中使用__u32和类似的类型。 302 类型。因此,我们在与用户空间共享的所有结构体中使用 __u32 和类似的类型。
302 303
303å¯èƒ½è¿˜æœ‰å…¶ä»–的情况,ä¸è¿‡åŸºæœ¬çš„规则是永远ä¸è¦ä½¿ç”¨typedef,除éžä½ å¯ä»¥æ˜Žç¡®çš„应用上 304å¯èƒ½è¿˜æœ‰å…¶ä»–的情况,ä¸è¿‡åŸºæœ¬çš„规则是永远ä¸è¦ä½¿ç”¨ typedef,除éžä½ å¯ä»¥æ˜Žç¡®çš„应用上
304è¿°æŸä¸ªè§„则中的一个。 305è¿°æŸä¸ªè§„则中的一个。
305 306
306总的æ¥è¯´ï¼Œå¦‚果一个指针或者一个结构体里的元素å¯ä»¥åˆç†çš„è¢«ç›´æŽ¥è®¿é—®åˆ°ï¼Œé‚£ä¹ˆå®ƒä»¬å°±ä¸ 307总的æ¥è¯´ï¼Œå¦‚果一个指针或者一个结构体里的元素å¯ä»¥åˆç†çš„被直接访问到,那么它们就ä¸
307应该是一个typedef。 308应该是一个 typedef。
308 309
309 310
310 第六章:函数 311 第六章:函数
311 312
312函数应该简短而漂亮,并且åªå®Œæˆä¸€ä»¶äº‹æƒ…。函数应该å¯ä»¥ä¸€å±æˆ–è€…ä¸¤å±æ˜¾ç¤ºå®Œï¼ˆæˆ‘们都知 313函数应该简短而漂亮,并且åªå®Œæˆä¸€ä»¶äº‹æƒ…。函数应该å¯ä»¥ä¸€å±æˆ–è€…ä¸¤å±æ˜¾ç¤ºå®Œï¼ˆæˆ‘们都知
313é“ISO/ANSIå±å¹•大尿˜¯80x24),åªåšä¸€ä»¶äº‹æƒ…,而且把它åšå¥½ã€‚ 314é“ ISO/ANSI å±å¹•大尿˜¯ 80x24),åªåšä¸€ä»¶äº‹æƒ…,而且把它åšå¥½ã€‚
314 315
315ä¸€ä¸ªå‡½æ•°çš„æœ€å¤§é•¿åº¦æ˜¯å’Œè¯¥å‡½æ•°çš„å¤æ‚度和缩进级数æˆå比的。所以,如果你有一个ç†è®ºä¸Š 316ä¸€ä¸ªå‡½æ•°çš„æœ€å¤§é•¿åº¦æ˜¯å’Œè¯¥å‡½æ•°çš„å¤æ‚度和缩进级数æˆå比的。所以,如果你有一个ç†è®ºä¸Š
316很简å•çš„åªæœ‰ä¸€ä¸ªå¾ˆé•¿ï¼ˆä½†æ˜¯ç®€å•)的case语å¥çš„函数,而且你需è¦åœ¨æ¯ä¸ªcase里åšå¾ˆå¤šå¾ˆ 317很简å•çš„åªæœ‰ä¸€ä¸ªå¾ˆé•¿ï¼ˆä½†æ˜¯ç®€å•)的 case 语å¥çš„函数,而且你需è¦åœ¨æ¯ä¸ª case 里åš
317å°çš„事情,这样的函数尽管很长,但也是å¯ä»¥çš„。 318很多很å°çš„事情,这样的函数尽管很长,但也是å¯ä»¥çš„。
318 319
319ä¸è¿‡ï¼Œå¦‚æžœä½ æœ‰ä¸€ä¸ªå¤æ‚çš„å‡½æ•°ï¼Œè€Œä¸”ä½ æ€€ç–‘ä¸€ä¸ªå¤©åˆ†ä¸æ˜¯å¾ˆé«˜çš„高中一年级学生å¯èƒ½ç”šè‡³ 320ä¸è¿‡ï¼Œå¦‚æžœä½ æœ‰ä¸€ä¸ªå¤æ‚çš„å‡½æ•°ï¼Œè€Œä¸”ä½ æ€€ç–‘ä¸€ä¸ªå¤©åˆ†ä¸æ˜¯å¾ˆé«˜çš„高中一年级学生å¯èƒ½ç”šè‡³
320æžä¸æ¸…楚这个函数的目的,你应该严格的éµå®ˆå‰é¢æåˆ°çš„长度é™åˆ¶ã€‚使用辅助函数,并为之 321æžä¸æ¸…楚这个函数的目的,你应该严格的éµå®ˆå‰é¢æåˆ°çš„长度é™åˆ¶ã€‚使用辅助函数,并为之
321å–个具æè¿°æ€§çš„å字(如果你觉得它们的性能很é‡è¦çš„è¯ï¼Œå¯ä»¥è®©ç¼–译器内è”它们,这样的 322å–个具æè¿°æ€§çš„å字(如果你觉得它们的性能很é‡è¦çš„è¯ï¼Œå¯ä»¥è®©ç¼–译器内è”它们,这样的
322æ•ˆæžœå¾€å¾€ä¼šæ¯”ä½ å†™ä¸€ä¸ªå¤æ‚函数的效果è¦å¥½ã€‚) 323æ•ˆæžœå¾€å¾€ä¼šæ¯”ä½ å†™ä¸€ä¸ªå¤æ‚函数的效果è¦å¥½ã€‚)
323 324
324函数的å¦å¤–ä¸€ä¸ªè¡¡é‡æ ‡å‡†æ˜¯æœ¬åœ°å˜é‡çš„æ•°é‡ã€‚此数é‡ä¸åº”超过5ï¼10个,å¦åˆ™ä½ çš„函数就有 325函数的å¦å¤–ä¸€ä¸ªè¡¡é‡æ ‡å‡†æ˜¯æœ¬åœ°å˜é‡çš„æ•°é‡ã€‚此数é‡ä¸åº”超过 5ï¼10 个,å¦åˆ™ä½ çš„函数就有
325é—®é¢˜äº†ã€‚é‡æ–°è€ƒè™‘ä¸€ä¸‹ä½ çš„å‡½æ•°ï¼ŒæŠŠå®ƒåˆ†æ‹†æˆæ›´å°çš„函数。人的大脑一般å¯ä»¥è½»æ¾çš„åŒæ—¶è·Ÿ 326é—®é¢˜äº†ã€‚é‡æ–°è€ƒè™‘ä¸€ä¸‹ä½ çš„å‡½æ•°ï¼ŒæŠŠå®ƒåˆ†æ‹†æˆæ›´å°çš„函数。人的大脑一般å¯ä»¥è½»æ¾çš„åŒæ—¶è·Ÿ
326踪7个ä¸åŒçš„事物,如果å†å¢žå¤šçš„è¯ï¼Œå°±ä¼šç³Šæ¶‚了。å³ä¾¿ä½ èªé¢–过人,你也å¯èƒ½ä¼šè®°ä¸æ¸…ä½ 2 327踪 7 个ä¸åŒçš„事物,如果å†å¢žå¤šçš„è¯ï¼Œå°±ä¼šç³Šæ¶‚了。å³ä¾¿ä½ èªé¢–过人,你也å¯èƒ½ä¼šè®°ä¸æ¸…ä½ 
327个星期å‰åšè¿‡çš„事情。 3282 个星期å‰åšè¿‡çš„事情。
328 329
329åœ¨æºæ–‡ä»¶é‡Œï¼Œä½¿ç”¨ç©ºè¡Œéš”å¼€ä¸åŒçš„函数。如果该函数需è¦è¢«å¯¼å‡ºï¼Œå®ƒçš„EXPORT*å®åº”该紧贴 330åœ¨æºæ–‡ä»¶é‡Œï¼Œä½¿ç”¨ç©ºè¡Œéš”å¼€ä¸åŒçš„函数。如果该函数需è¦è¢«å¯¼å‡ºï¼Œå®ƒçš„ EXPORT* å®åº”该紧贴
330在它的结æŸå¤§æ‹¬å·ä¹‹ä¸‹ã€‚比如: 331在它的结æŸå¤§æ‹¬å·ä¹‹ä¸‹ã€‚比如:
331 332
332int system_is_up(void) 333 int system_is_up(void)
333{ 334 {
334 return system_state == SYSTEM_RUNNING; 335 return system_state == SYSTEM_RUNNING;
335} 336 }
336EXPORT_SYMBOL(system_is_up); 337 EXPORT_SYMBOL(system_is_up);
337 338
338在函数原型中,包å«å‡½æ•°å和它们的数æ®ç±»åž‹ã€‚虽然Cè¯­è¨€é‡Œæ²¡æœ‰è¿™æ ·çš„è¦æ±‚,在Linux里这 339在函数原型中,包å«å‡½æ•°å和它们的数æ®ç±»åž‹ã€‚虽然Cè¯­è¨€é‡Œæ²¡æœ‰è¿™æ ·çš„è¦æ±‚,在 Linux 里这
339是æå€¡çš„åšæ³•,因为这样å¯ä»¥å¾ˆç®€å•的给读者æä¾›æ›´å¤šçš„æœ‰ä»·å€¼çš„ä¿¡æ¯ã€‚ 340是æå€¡çš„åšæ³•,因为这样å¯ä»¥å¾ˆç®€å•的给读者æä¾›æ›´å¤šçš„æœ‰ä»·å€¼çš„ä¿¡æ¯ã€‚
340 341
341 342
342 第七章:集中的函数退出途径 343 第七章:集中的函数退出途径
343 344
344虽然被æŸäº›äººå£°ç§°å·²ç»è¿‡æ—¶ï¼Œä½†æ˜¯goto语å¥çš„等价物还是ç»å¸¸è¢«ç¼–è¯‘å™¨æ‰€ä½¿ç”¨ï¼Œå…·ä½“å½¢å¼æ˜¯ 345虽然被æŸäº›äººå£°ç§°å·²ç»è¿‡æ—¶ï¼Œä½†æ˜¯ goto 语å¥çš„等价物还是ç»å¸¸è¢«ç¼–è¯‘å™¨æ‰€ä½¿ç”¨ï¼Œå…·ä½“å½¢å¼æ˜¯
345æ— æ¡ä»¶è·³è½¬æŒ‡ä»¤ã€‚ 346æ— æ¡ä»¶è·³è½¬æŒ‡ä»¤ã€‚
346 347
347当一个函数从多个ä½ç½®é€€å‡ºå¹¶ä¸”需è¦åšä¸€äº›é€šç”¨çš„æ¸…ç†å·¥ä½œçš„æ—¶å€™ï¼Œgotoçš„å¥½å¤„å°±æ˜¾çŽ°å‡ºæ¥ 348当一个函数从多个ä½ç½®é€€å‡ºï¼Œå¹¶ä¸”需è¦åšä¸€äº›ç±»ä¼¼æ¸…ç†çš„å¸¸è§æ“作时,goto 语å¥å°±å¾ˆæ–¹ä¾¿äº†ã€‚
348了。 349如果并ä¸éœ€è¦æ¸…çæ“ä½œï¼Œé‚£ä¹ˆç›´æŽ¥ return å³å¯ã€‚
349 350
350ç†ç”±æ˜¯ï¼š 351ç†ç”±æ˜¯ï¼š
351 352
@@ -354,26 +355,37 @@ EXPORT_SYMBOL(system_is_up);
354- å¯ä»¥é¿å…由于修改时忘记更新æŸä¸ªå•独的退出点而导致的错误 355- å¯ä»¥é¿å…由于修改时忘记更新æŸä¸ªå•独的退出点而导致的错误
355- å‡è½»äº†ç¼–译器的工作,无需删除冗余代ç ;) 356- å‡è½»äº†ç¼–译器的工作,无需删除冗余代ç ;)
356 357
357int fun(int a) 358 int fun(int a)
358{ 359 {
359 int result = 0; 360 int result = 0;
360 char *buffer = kmalloc(SIZE); 361 char *buffer;
361 362
362 if (buffer == NULL) 363 buffer = kmalloc(SIZE, GFP_KERNEL);
363 return -ENOMEM; 364 if (!buffer)
364 365 return -ENOMEM;
365 if (condition1) { 366
366 while (loop1) { 367 if (condition1) {
367 ... 368 while (loop1) {
369 ...
370 }
371 result = 1;
372 goto out_buffer;
368 } 373 }
369 result = 1; 374 ...
370 goto out; 375 out_buffer:
376 kfree(buffer);
377 return result;
371 } 378 }
372 ... 379
373out: 380ä¸€ä¸ªéœ€è¦æ³¨æ„的常è§é”™è¯¯æ˜¯â€œä¸€ä¸ª err 错误â€ï¼Œå°±åƒè¿™æ ·ï¼š
374 kfree(buffer); 381
375 return result; 382 err:
376} 383 kfree(foo->bar);
384 kfree(foo);
385 return ret;
386
387这段代ç çš„错误是,在æŸäº›é€€å‡ºè·¯å¾„上 “foo†是 NULL。通常情况下,通过把它分离æˆä¸¤ä¸ª
388错误标签 “err_bar:†和 “err_foo:†æ¥ä¿®å¤è¿™ä¸ªé”™è¯¯ã€‚
377 389
378 第八章:注释 390 第八章:注释
379 391
@@ -386,10 +398,10 @@ out:
386加太多。你应该åšçš„,是把注释放在函数的头部,告诉人们它åšäº†ä»€ä¹ˆï¼Œä¹Ÿå¯ä»¥åŠ ä¸Šå®ƒåšè¿™ 398加太多。你应该åšçš„,是把注释放在函数的头部,告诉人们它åšäº†ä»€ä¹ˆï¼Œä¹Ÿå¯ä»¥åŠ ä¸Šå®ƒåšè¿™
387些事情的原因。 399些事情的原因。
388 400
389当注释内核API函数时,请使用kernel-docæ ¼å¼ã€‚请看 401当注释内核API函数时,请使用 kernel-doc æ ¼å¼ã€‚请看
390Documentation/kernel-doc-nano-HOWTO.txtå’Œscripts/kernel-doc以获得详细信æ¯ã€‚ 402Documentation/kernel-doc-nano-HOWTO.txtå’Œscripts/kernel-doc 以获得详细信æ¯ã€‚
391 403
392Linux的注释风格是C89“/* ... */â€é£Žæ ¼ã€‚ä¸è¦ä½¿ç”¨C99风格“// ...â€æ³¨é‡Šã€‚ 404Linux的注释风格是 C89 “/* ... */†风格。ä¸è¦ä½¿ç”¨ C99 风格 “// ...†注释。
393 405
394长(多行)的首选注释风格是: 406长(多行)的首选注释风格是:
395 407
@@ -402,6 +414,15 @@ Linux的注释风格是C89“/* ... */â€é£Žæ ¼ã€‚ä¸è¦ä½¿ç”¨C99风格“// ...
402 * with beginning and ending almost-blank lines. 414 * with beginning and ending almost-blank lines.
403 */ 415 */
404 416
417对于在 net/ å’Œ drivers/net/ 的文件,首选的长(多行)注释风格有些ä¸åŒã€‚
418
419 /* The preferred comment style for files in net/ and drivers/net
420 * looks like this.
421 *
422 * It is nearly the same as the generally preferred comment style,
423 * but there is no initial almost-blank line.
424 */
425
405注释数æ®ä¹Ÿæ˜¯å¾ˆé‡è¦çš„,ä¸ç®¡æ˜¯åŸºæœ¬ç±»åž‹è¿˜æ˜¯è¡ç”Ÿç±»åž‹ã€‚为了方便实现这一点,æ¯ä¸€è¡Œåº”åª 426注释数æ®ä¹Ÿæ˜¯å¾ˆé‡è¦çš„,ä¸ç®¡æ˜¯åŸºæœ¬ç±»åž‹è¿˜æ˜¯è¡ç”Ÿç±»åž‹ã€‚为了方便实现这一点,æ¯ä¸€è¡Œåº”åª
406声明一个数æ®ï¼ˆä¸è¦ä½¿ç”¨é€—å·æ¥ä¸€æ¬¡å£°æ˜Žå¤šä¸ªæ•°æ®ï¼‰ã€‚这样你就有空间æ¥ä¸ºæ¯ä¸ªæ•°æ®å†™ä¸€æ®µ 427声明一个数æ®ï¼ˆä¸è¦ä½¿ç”¨é€—å·æ¥ä¸€æ¬¡å£°æ˜Žå¤šä¸ªæ•°æ®ï¼‰ã€‚这样你就有空间æ¥ä¸ºæ¯ä¸ªæ•°æ®å†™ä¸€æ®µ
407å°æ³¨é‡Šæ¥è§£é‡Šå®ƒä»¬çš„用途了。 428å°æ³¨é‡Šæ¥è§£é‡Šå®ƒä»¬çš„用途了。
@@ -409,49 +430,63 @@ Linux的注释风格是C89“/* ... */â€é£Žæ ¼ã€‚ä¸è¦ä½¿ç”¨C99风格“// ...
409 430
410 第ä¹ç« ï¼šä½ å·²ç»æŠŠäº‹æƒ…弄糟了 431 第ä¹ç« ï¼šä½ å·²ç»æŠŠäº‹æƒ…弄糟了
411 432
412这没什么,我们都是这样。å¯èƒ½ä½ çš„使用了很长时间Unix的朋å‹å·²ç»å‘Šè¯‰ä½ â€œGNU emacsâ€èƒ½ 433这没什么,我们都是这样。å¯èƒ½ä½ çš„使用了很长时间 Unix 的朋å‹å·²ç»å‘Šè¯‰ä½  “GNU emacs†能
413自动帮你格å¼åŒ–Cæºä»£ç ï¼Œè€Œä¸”你也注æ„到了,确实是这样,ä¸è¿‡å®ƒæ‰€ä½¿ç”¨çš„默认值和我们 434自动帮你格å¼åŒ– C æºä»£ç ï¼Œè€Œä¸”你也注æ„到了,确实是这样,ä¸è¿‡å®ƒæ‰€ä½¿ç”¨çš„默认值和我们
414想è¦çš„ç›¸åŽ»ç”šè¿œï¼ˆå®žé™…ä¸Šï¼Œç”šè‡³æ¯”éšæœºæ‰“的还è¦å·®â€”—无数个猴å­åœ¨GNU emacsé‡Œæ‰“å­—æ°¸è¿œä¸ 435想è¦çš„ç›¸åŽ»ç”šè¿œï¼ˆå®žé™…ä¸Šï¼Œç”šè‡³æ¯”éšæœºæ‰“的还è¦å·®â€”—无数个猴å­åœ¨ GNU emacs 里打字永远ä¸
415会创造出一个好程åºï¼‰ï¼ˆè¯‘注:请å‚考Infinite Monkey Theorem) 436会创造出一个好程åºï¼‰ï¼ˆè¯‘注:请å‚考 Infinite Monkey Theorem)
416 437
417所以你è¦ä¹ˆæ”¾å¼ƒGNU emacs,è¦ä¹ˆæ”¹å˜å®ƒè®©å®ƒä½¿ç”¨æ›´åˆç†çš„设定。è¦é‡‡ç”¨åŽä¸€ä¸ªæ–¹æ¡ˆï¼Œä½ å¯ 438所以你è¦ä¹ˆæ”¾å¼ƒ GNU emacs,è¦ä¹ˆæ”¹å˜å®ƒè®©å®ƒä½¿ç”¨æ›´åˆç†çš„设定。è¦é‡‡ç”¨åŽä¸€ä¸ªæ–¹æ¡ˆï¼Œä½ å¯
418以把下é¢è¿™æ®µç²˜è´´åˆ°ä½ çš„.emacs文件里。 439以把下é¢è¿™æ®µç²˜è´´åˆ°ä½ çš„ .emacs 文件里。
419 440
420(defun linux-c-mode () 441(defun c-lineup-arglist-tabs-only (ignored)
421 "C mode with adjusted defaults for use with the Linux kernel." 442 "Line up argument lists by tabs, not spaces"
422 (interactive) 443 (let* ((anchor (c-langelem-pos c-syntactic-element))
423 (c-mode) 444 (column (c-langelem-2nd-pos c-syntactic-element))
424 (c-set-style "K&R") 445 (offset (- (1+ column) anchor))
425 (setq tab-width 8) 446 (steps (floor offset c-basic-offset)))
426 (setq indent-tabs-mode t) 447 (* (max steps 1)
427 (setq c-basic-offset 8)) 448 c-basic-offset)))
428 449
429这样就定义了M-x linux-c-mode命令。当你hack一个模å—的时候,如果你把字符串 450(add-hook 'c-mode-common-hook
430-*- linux-c -*-放在头两行的æŸä¸ªä½ç½®ï¼Œè¿™ä¸ªæ¨¡å¼å°†ä¼šè¢«è‡ªåŠ¨è°ƒç”¨ã€‚å¦‚æžœä½ å¸Œæœ›åœ¨ä½ ä¿®æ”¹ 451 (lambda ()
431/usr/src/linux里的文件时魔术般自动打开linux-c-modeçš„è¯ï¼Œä½ ä¹Ÿå¯èƒ½éœ€è¦æ·»åŠ  452 ;; Add kernel style
432 453 (c-add-style
433(setq auto-mode-alist (cons '("/usr/src/linux.*/.*\\.[ch]$" . linux-c-mode) 454 "linux-tabs-only"
434 auto-mode-alist)) 455 '("linux" (c-offsets-alist
435 456 (arglist-cont-nonempty
436到你的.emacs文件里。 457 c-lineup-gcc-asm-reg
437 458 c-lineup-arglist-tabs-only))))))
438ä¸è¿‡å°±ç®—ä½ å°è¯•让emacs正确的格å¼åŒ–代ç å¤±è´¥äº†ï¼Œä¹Ÿå¹¶ä¸æ„味ç€ä½ å¤±åŽ»äº†ä¸€åˆ‡ï¼šè¿˜å¯ä»¥ç”¨â€œ 459
439indentâ€ã€‚ 460(add-hook 'c-mode-hook
440 461 (lambda ()
441ä¸è¿‡ï¼ŒGNU indent也有和GNU emacs一样有问题的设定,所以你需è¦ç»™å®ƒä¸€äº›å‘½ä»¤é€‰é¡¹ã€‚ä¸ 462 (let ((filename (buffer-file-name)))
442过,这还ä¸ç®—太糟糕,因为就算是GNU indent的作者也认åŒK&Rçš„æƒå¨æ€§ï¼ˆGNUçš„äººå¹¶ä¸æ˜¯å 463 ;; Enable kernel mode for the appropriate files
443äººï¼Œä»–ä»¬åªæ˜¯åœ¨è¿™ä¸ªé—®é¢˜ä¸Šè¢«ä¸¥é‡çš„误导了),所以你åªè¦ç»™indent指定选项“-kr -i8†464 (when (and filename
444(代表“K&R,8个字符缩进â€ï¼‰ï¼Œæˆ–者使用“scripts/Lindentâ€ï¼Œè¿™æ ·å°±å¯ä»¥ä»¥æœ€æ—¶é«¦çš„æ–¹å¼ 465 (string-match (expand-file-name "~/src/linux-trees")
466 filename))
467 (setq indent-tabs-mode t)
468 (setq show-trailing-whitespace t)
469 (c-set-style "linux-tabs-only")))))
470
471这会让 emacs 在 ~/src/linux-trees 目录下的 C æºæ–‡ä»¶èŽ·å¾—æ›´å¥½çš„å†…æ ¸ä»£ç é£Žæ ¼ã€‚
472
473ä¸è¿‡å°±ç®—ä½ å°è¯•让 emacs 正确的格å¼åŒ–代ç å¤±è´¥äº†ï¼Œä¹Ÿå¹¶ä¸æ„味ç€ä½ å¤±åŽ»äº†ä¸€åˆ‡ï¼šè¿˜å¯ä»¥ç”¨
474“indentâ€ã€‚
475
476ä¸è¿‡ï¼ŒGNU indent 也有和 GNU emacs 一样有问题的设定,所以你需è¦ç»™å®ƒä¸€äº›å‘½ä»¤é€‰é¡¹ã€‚ä¸
477过,这还ä¸ç®—太糟糕,因为就算是 GNU indent çš„ä½œè€…ä¹Ÿè®¤åŒ K&R çš„æƒå¨æ€§ï¼ˆGNU çš„äººå¹¶ä¸æ˜¯
478åäººï¼Œä»–ä»¬åªæ˜¯åœ¨è¿™ä¸ªé—®é¢˜ä¸Šè¢«ä¸¥é‡çš„误导了),所以你åªè¦ç»™ indent 指定选项 “-kr -i8â€
479(代表 “K&R,8 个字符缩进â€ï¼‰ï¼Œæˆ–者使用 “scripts/Lindentâ€ï¼Œè¿™æ ·å°±å¯ä»¥ä»¥æœ€æ—¶é«¦çš„æ–¹å¼
445缩进æºä»£ç ã€‚ 480缩进æºä»£ç ã€‚
446 481
447“indentâ€æœ‰å¾ˆå¤šé€‰é¡¹ï¼Œç‰¹åˆ«æ˜¯é‡æ–°æ ¼å¼åŒ–注释的时候,你å¯èƒ½éœ€è¦çœ‹ä¸€ä¸‹å®ƒçš„æ‰‹å†Œé¡µã€‚ä¸è¿‡ 482“indentâ€ æœ‰å¾ˆå¤šé€‰é¡¹ï¼Œç‰¹åˆ«æ˜¯é‡æ–°æ ¼å¼åŒ–注释的时候,你å¯èƒ½éœ€è¦çœ‹ä¸€ä¸‹å®ƒçš„æ‰‹å†Œé¡µã€‚ä¸è¿‡
448è®°ä½ï¼šâ€œindentâ€ä¸èƒ½ä¿®æ­£å的编程习惯。 483è®°ä½ï¼šâ€œindent†ä¸èƒ½ä¿®æ­£å的编程习惯。
449 484
450 485
451 第å章:Kconfigé…置文件 486 第å章:Kconfig é…置文件
452 487
453对于é布æºç æ ‘的所有Kconfig*é…置文件æ¥è¯´ï¼Œå®ƒä»¬ç¼©è¿›æ–¹å¼ä¸ŽC代ç ç›¸æ¯”有所ä¸åŒã€‚紧挨 488对于é布æºç æ ‘的所有 Kconfig* é…置文件æ¥è¯´ï¼Œå®ƒä»¬ç¼©è¿›æ–¹å¼ä¸Ž C 代ç ç›¸æ¯”有所ä¸åŒã€‚紧挨
454在“configâ€å®šä¹‰ä¸‹é¢çš„行缩进一个制表符,帮助信æ¯åˆ™å†å¤šç¼©è¿›2个空格。比如: 489在 “config†定义下é¢çš„行缩进一个制表符,帮助信æ¯åˆ™å†å¤šç¼©è¿› 2 个空格。比如:
455 490
456config AUDIT 491config AUDIT
457 bool "Auditing support" 492 bool "Auditing support"
@@ -470,7 +505,7 @@ config ADFS_FS_RW
470 depends on ADFS_FS 505 depends on ADFS_FS
471 ... 506 ...
472 507
473è¦æŸ¥çœ‹é…置文件的完整文档,请看Documentation/kbuild/kconfig-language.txt。 508è¦æŸ¥çœ‹é…置文件的完整文档,请看 Documentation/kbuild/kconfig-language.txt。
474 509
475 510
476 第å一章:数æ®ç»“æž„ 511 第å一章:数æ®ç»“æž„
@@ -489,11 +524,11 @@ config ADFS_FS_RW
489很多数æ®ç»“构实际上有2级引用计数,它们通常有ä¸åŒâ€œç±»â€çš„用户。å­ç±»è®¡æ•°å™¨ç»Ÿè®¡å­ç±»ç”¨ 524很多数æ®ç»“构实际上有2级引用计数,它们通常有ä¸åŒâ€œç±»â€çš„用户。å­ç±»è®¡æ•°å™¨ç»Ÿè®¡å­ç±»ç”¨
490户的数é‡ï¼Œæ¯å½“å­ç±»è®¡æ•°å™¨å‡è‡³é›¶æ—¶ï¼Œå…¨å±€è®¡æ•°å™¨å‡ä¸€ã€‚ 525户的数é‡ï¼Œæ¯å½“å­ç±»è®¡æ•°å™¨å‡è‡³é›¶æ—¶ï¼Œå…¨å±€è®¡æ•°å™¨å‡ä¸€ã€‚
491 526
492è¿™ç§â€œå¤šçº§å¼•用计数â€çš„例å­å¯ä»¥åœ¨å†…存管ç†ï¼ˆâ€œstruct mm_structâ€ï¼šmm_userså’Œmm_count) 527è¿™ç§â€œå¤šçº§å¼•用计数â€çš„例å­å¯ä»¥åœ¨å†…存管ç†ï¼ˆâ€œstruct mm_structâ€ï¼šmm_users å’Œ mm_count)
493和文件系统(“struct super_blockâ€ï¼šs_countå’Œs_active)中找到。 528和文件系统(“struct super_blockâ€ï¼šs_countå’Œs_active)中找到。
494 529
495è®°ä½ï¼šå¦‚æžœå¦ä¸€ä¸ªæ‰§è¡Œçº¿ç´¢å¯ä»¥æ‰¾åˆ°ä½ çš„æ•°æ®ç»“构,但是这个数æ®ç»“构没有引用计数器,这 530è®°ä½ï¼šå¦‚æžœå¦ä¸€ä¸ªæ‰§è¡Œçº¿ç´¢å¯ä»¥æ‰¾åˆ°ä½ çš„æ•°æ®ç»“构,但是这个数æ®ç»“构没有引用计数器,这
496里几乎肯定是一个bug。 531里几乎肯定是一个 bug。
497 532
498 533
499 第å二章:å®ï¼Œæžšä¸¾å’ŒRTL 534 第å二章:å®ï¼Œæžšä¸¾å’ŒRTL
@@ -508,102 +543,128 @@ config ADFS_FS_RW
508 543
509一般的,如果能写æˆå†…è”函数就ä¸è¦å†™æˆåƒå‡½æ•°çš„å®ã€‚ 544一般的,如果能写æˆå†…è”函数就ä¸è¦å†™æˆåƒå‡½æ•°çš„å®ã€‚
510 545
511嫿œ‰å¤šä¸ªè¯­å¥çš„å®åº”该被包å«åœ¨ä¸€ä¸ªdo-while代ç å—里: 546嫿œ‰å¤šä¸ªè¯­å¥çš„å®åº”该被包å«åœ¨ä¸€ä¸ª do-while 代ç å—里:
512 547
513#define macrofun(a, b, c) \ 548 #define macrofun(a, b, c) \
514 do { \ 549 do { \
515 if (a == 5) \ 550 if (a == 5) \
516 do_this(b, c); \ 551 do_this(b, c); \
517 } while (0) 552 } while (0)
518 553
519使用å®çš„æ—¶å€™åº”é¿å…的事情: 554使用å®çš„æ—¶å€™åº”é¿å…的事情:
520 555
5211) å½±å“æŽ§åˆ¶æµç¨‹çš„å®ï¼š 5561) å½±å“æŽ§åˆ¶æµç¨‹çš„å®ï¼š
522 557
523#define FOO(x) \ 558 #define FOO(x) \
524 do { \ 559 do { \
525 if (blah(x) < 0) \ 560 if (blah(x) < 0) \
526 return -EBUGGERED; \ 561 return -EBUGGERED; \
527 } while(0) 562 } while (0)
528 563
529éžå¸¸ä¸å¥½ã€‚它看起æ¥åƒä¸€ä¸ªå‡½æ•°ï¼Œä¸è¿‡å´èƒ½å¯¼è‡´â€œè°ƒç”¨â€å®ƒçš„函数退出;ä¸è¦æ‰“乱读者大脑里 564éžå¸¸ä¸å¥½ã€‚它看起æ¥åƒä¸€ä¸ªå‡½æ•°ï¼Œä¸è¿‡å´èƒ½å¯¼è‡´â€œè°ƒç”¨â€å®ƒçš„函数退出;ä¸è¦æ‰“乱读者大脑里
530的语法分æžå™¨ã€‚ 565的语法分æžå™¨ã€‚
531 566
5322) ä¾èµ–于一个固定å字的本地å˜é‡çš„å®ï¼š 5672) ä¾èµ–于一个固定å字的本地å˜é‡çš„å®ï¼š
533 568
534#define FOO(val) bar(index, val) 569 #define FOO(val) bar(index, val)
535 570
536å¯èƒ½çœ‹èµ·æ¥åƒæ˜¯ä¸ªä¸é”™çš„东西,ä¸è¿‡å®ƒéžå¸¸å®¹æ˜“把读代ç çš„人æžç³Šæ¶‚ï¼Œè€Œä¸”å®¹æ˜“å¯¼è‡´çœ‹èµ·æ¥ 571å¯èƒ½çœ‹èµ·æ¥åƒæ˜¯ä¸ªä¸é”™çš„东西,ä¸è¿‡å®ƒéžå¸¸å®¹æ˜“把读代ç çš„人æžç³Šæ¶‚,而且容易导致看起æ¥
537ä¸ç›¸å…³çš„æ”¹åЍ另æ¥é”™è¯¯ã€‚ 572ä¸ç›¸å…³çš„æ”¹åЍ另æ¥é”™è¯¯ã€‚
538 573
5393) ä½œä¸ºå·¦å€¼çš„å¸¦å‚æ•°çš„å®ï¼š FOO(x) = y;如果有人把FOOå˜æˆä¸€ä¸ªå†…è”函数的è¯ï¼Œè¿™ç§ç”¨ 5743) ä½œä¸ºå·¦å€¼çš„å¸¦å‚æ•°çš„å®ï¼š FOO(x) = y;如果有人把 FOO å˜æˆä¸€ä¸ªå†…è”函数的è¯ï¼Œè¿™ç§ç”¨
540法就会出错了。 575法就会出错了。
541 576
5424) 忘记了优先级:使用表达å¼å®šä¹‰å¸¸é‡çš„å®å¿…须将表达å¼ç½®äºŽä¸€å¯¹å°æ‹¬å·ä¹‹å†…ã€‚å¸¦å‚æ•°çš„ 5774) 忘记了优先级:使用表达å¼å®šä¹‰å¸¸é‡çš„å®å¿…须将表达å¼ç½®äºŽä¸€å¯¹å°æ‹¬å·ä¹‹å†…ã€‚å¸¦å‚æ•°çš„
543å®ä¹Ÿè¦æ³¨æ„此类问题。 578å®ä¹Ÿè¦æ³¨æ„此类问题。
544 579
545#define CONSTANT 0x4000 580 #define CONSTANT 0x4000
546#define CONSTEXP (CONSTANT | 3) 581 #define CONSTEXP (CONSTANT | 3)
582
5835) 在å®é‡Œå®šä¹‰ç±»ä¼¼å‡½æ•°çš„æœ¬åœ°å˜é‡æ—¶å‘½å冲çªï¼š
547 584
548cpp手册对å®çš„讲解很详细。Gcc internals手册也详细讲解了RTL(译注:register 585 #define FOO(x) \
586 ({ \
587 typeof(x) ret; \
588 ret = calc_ret(x); \
589 (ret); \
590 })
591
592ret 是本地å˜é‡çš„通用åå­— - __foo_ret æ›´ä¸å®¹æ˜“与一个已存在的å˜é‡å†²çªã€‚
593
594cpp 手册对å®çš„讲解很详细。gcc internals 手册也详细讲解了 RTL(译注:register
549transfer language),内核里的汇编语言ç»å¸¸ç”¨åˆ°å®ƒã€‚ 595transfer language),内核里的汇编语言ç»å¸¸ç”¨åˆ°å®ƒã€‚
550 596
551 597
552 第å三章:打å°å†…æ ¸æ¶ˆæ¯ 598 第å三章:打å°å†…核消æ¯
553 599
554内核开å‘者应该是å—过良好教育的。请一定注æ„内核信æ¯çš„æ‹¼å†™ï¼Œä»¥ç»™äººä»¥å¥½çš„å°è±¡ã€‚ä¸è¦ 600内核开å‘者应该是å—过良好教育的。请一定注æ„内核信æ¯çš„æ‹¼å†™ï¼Œä»¥ç»™äººä»¥å¥½çš„å°è±¡ã€‚ä¸è¦
555用ä¸è§„范的å•è¯æ¯”如“dontâ€ï¼Œè€Œè¦ç”¨â€œdo notâ€æˆ–者“don'tâ€ã€‚ä¿è¯è¿™äº›ä¿¡æ¯ç®€å•ã€æ˜Žäº†ã€æ—  601用ä¸è§„范的å•è¯æ¯”如 “dontâ€ï¼Œè€Œè¦ç”¨ “do notâ€æˆ–者 “don'tâ€ã€‚ä¿è¯è¿™äº›ä¿¡æ¯ç®€å•ã€æ˜Žäº†ã€
556歧义。 602无歧义。
557 603
558内核信æ¯ä¸å¿…以å¥å·ï¼ˆè¯‘注:英文å¥å·ï¼Œå³ç‚¹ï¼‰ç»“æŸã€‚ 604内核信æ¯ä¸å¿…以å¥å·ï¼ˆè¯‘注:英文å¥å·ï¼Œå³ç‚¹ï¼‰ç»“æŸã€‚
559 605
560åœ¨å°æ‹¬å·é‡Œæ‰“å°æ•°å­—(%d)没有任何价值,应该é¿å…这样åšã€‚ 606åœ¨å°æ‹¬å·é‡Œæ‰“å°æ•°å­— (%d) 没有任何价值,应该é¿å…这样åšã€‚
561 607
562<linux/device.h>里有一些驱动模型诊断å®ï¼Œä½ åº”该使用它们,以确ä¿ä¿¡æ¯å¯¹åº”于正确的 608<linux/device.h> 里有一些驱动模型诊断å®ï¼Œä½ åº”该使用它们,以确ä¿ä¿¡æ¯å¯¹åº”于正确的
563设备和驱动,并且被标记了正确的消æ¯çº§åˆ«ã€‚è¿™äº›å®æœ‰ï¼šdev_err(), dev_warn(), 609设备和驱动,并且被标记了正确的消æ¯çº§åˆ«ã€‚è¿™äº›å®æœ‰ï¼šdev_err(),dev_warn(),
564dev_info()等等。对于那些ä¸å’ŒæŸä¸ªç‰¹å®šè®¾å¤‡ç›¸å…³è¿žçš„ä¿¡æ¯ï¼Œ<linux/kernel.h>定义了 610dev_info() 等等。对于那些ä¸å’ŒæŸä¸ªç‰¹å®šè®¾å¤‡ç›¸å…³è¿žçš„ä¿¡æ¯ï¼Œ<linux/printk.h> 定义了
565pr_debug()和pr_info()。 611pr_notice(),pr_info(),pr_warn(),pr_err() 和其他。
566 612
567写出好的调试信æ¯å¯ä»¥æ˜¯ä¸€ä¸ªå¾ˆå¤§çš„æŒ‘战;当你写出æ¥ä¹‹åŽï¼Œè¿™äº›ä¿¡æ¯åœ¨è¿œç¨‹é™¤é”™çš„æ—¶å€™ 613写出好的调试信æ¯å¯ä»¥æ˜¯ä¸€ä¸ªå¾ˆå¤§çš„æŒ‘战;一旦你写出åŽï¼Œè¿™äº›ä¿¡æ¯åœ¨è¿œç¨‹é™¤é”™æ—¶èƒ½æä¾›æžå¤§
568就会æˆä¸ºæžå¤§çš„帮助。当DEBUGç¬¦å·æ²¡æœ‰è¢«å®šä¹‰çš„æ—¶å€™ï¼Œè¿™äº›ä¿¡æ¯ä¸åº”该被编译进内核里 614的帮助。然而打å°è°ƒè¯•ä¿¡æ¯çš„å¤„ç†æ–¹å¼åŒæ‰“å°éžè°ƒè¯•ä¿¡æ¯ä¸åŒã€‚å…¶ä»– pr_XXX() 函数能无æ¡ä»¶åœ°
569(也就是说,默认地,它们ä¸åº”该被包å«åœ¨å†…)。如果你使用dev_dbg()或者pr_debug(), 615打å°ï¼Œpr_debug() å´ä¸ï¼›é»˜è®¤æƒ…况下它ä¸ä¼šè¢«ç¼–译,除éžå®šä¹‰äº† DEBUG 或设定了
570就能自动达到这个效果。很多å­ç³»ç»Ÿæ‹¥æœ‰Kconfig选项æ¥å¯ç”¨-DDEBUG。还有一个相关的惯例 616CONFIG_DYNAMIC_DEBUGã€‚å®žé™…è¿™åŒæ ·æ˜¯ä¸ºäº† dev_dbg(),一个相关约定是在一个已ç»å¼€å¯äº†
571是使用VERBOSE_DEBUGæ¥æ·»åŠ dev_vdbg()消æ¯åˆ°é‚£äº›å·²ç»ç”±DEBUGå¯ç”¨çš„æ¶ˆæ¯ä¹‹ä¸Šã€‚ 617DEBUG 时,使用 VERBOSE_DEBUG æ¥æ·»åŠ  dev_vdbg()。
618
619许多å­ç³»ç»Ÿæ‹¥æœ‰ Kconfig 调试选项æ¥å¼€å¯ -DDEBUG 在对应的 Makefile 里é¢ï¼›åœ¨å…¶ä»–
620情况下,特殊文件使用 #define DEBUG。当一æ¡è°ƒè¯•ä¿¡æ¯éœ€è¦è¢«æ— æ¡ä»¶æ‰“å°æ—¶ï¼Œä¾‹å¦‚,如果
621å·²ç»åŒ…å«ä¸€ä¸ªè°ƒè¯•相关的 #ifdef æ¡ä»¶ï¼Œprintk(KERN_DEBUG ...) å°±å¯è¢«ä½¿ç”¨ã€‚
572 622
573 623
574 第å四章:分é…内存 624 第å四章:分é…内存
575 625
576内核æä¾›äº†ä¸‹é¢çš„一般用途的内存分é…函数:kmalloc(),kzalloc(),kcalloc()å’Œ 626内核æä¾›äº†ä¸‹é¢çš„一般用途的内存分é…函数:
577vmalloc()。请å‚考APIæ–‡æ¡£ä»¥èŽ·å–æœ‰å…³å®ƒä»¬çš„详细信æ¯ã€‚ 627kmalloc(),kzalloc(),kmalloc_array(),kcalloc(),vmalloc() å’Œ vzalloc()。
628请å‚考 API æ–‡æ¡£ä»¥èŽ·å–æœ‰å…³å®ƒä»¬çš„详细信æ¯ã€‚
578 629
579传递结构体大å°çš„首选形弿˜¯è¿™æ ·çš„: 630传递结构体大å°çš„首选形弿˜¯è¿™æ ·çš„:
580 631
581 p = kmalloc(sizeof(*p), ...); 632 p = kmalloc(sizeof(*p), ...);
582 633
583å¦å¤–一ç§ä¼ é€’æ–¹å¼ä¸­ï¼Œsizeofçš„æ“作数是结构体的å字,这样会é™ä½Žå¯è¯»æ€§ï¼Œå¹¶ä¸”å¯èƒ½ä¼šå¼• 634å¦å¤–一ç§ä¼ é€’æ–¹å¼ä¸­ï¼Œsizeof çš„æ“作数是结构体的å字,这样会é™ä½Žå¯è¯»æ€§ï¼Œå¹¶ä¸”å¯èƒ½ä¼šå¼•
584å…¥bug。有å¯èƒ½æŒ‡é’ˆå˜é‡ç±»åž‹è¢«æ”¹å˜æ—¶ï¼Œè€Œå¯¹åº”的传递给内存分é…函数的sizeof的结果ä¸å˜ã€‚ 635å…¥ bug。有å¯èƒ½æŒ‡é’ˆå˜é‡ç±»åž‹è¢«æ”¹å˜æ—¶ï¼Œè€Œå¯¹åº”的传递给内存分é…函数的 sizeof 的结果ä¸å˜ã€‚
585 636
586强制转æ¢ä¸€ä¸ªvoid指针返回值是多余的。C语言本身ä¿è¯äº†ä»Žvoid指针到其他任何指针类型 637强制转æ¢ä¸€ä¸ª void 指针返回值是多余的。C 语言本身ä¿è¯äº†ä»Ž void 指针到其他任何指针类型
587çš„è½¬æ¢æ˜¯æ²¡æœ‰é—®é¢˜çš„。 638çš„è½¬æ¢æ˜¯æ²¡æœ‰é—®é¢˜çš„。
588 639
640分é…ä¸€ä¸ªæ•°ç»„çš„é¦–é€‰å½¢å¼æ˜¯è¿™æ ·çš„:
641
642 p = kmalloc_array(n, sizeof(...), ...);
643
644分é…ä¸€ä¸ªé›¶é•¿æ•°ç»„çš„é¦–é€‰å½¢å¼æ˜¯è¿™æ ·çš„:
645
646 p = kcalloc(n, sizeof(...), ...);
647
648两ç§å½¢å¼æ£€æŸ¥åˆ†é…å¤§å° n * sizeof(...) 的溢出,如果溢出返回 NULL。
649
589 650
590 第å五章:内è”弊病 651 第å五章:内è”弊病
591 652
592有一个常è§çš„误解是内è”函数是gccæä¾›çš„å¯ä»¥è®©ä»£ç è¿è¡Œæ›´å¿«çš„ä¸€ä¸ªé€‰é¡¹ã€‚è™½ç„¶ä½¿ç”¨å†…è” 653有一个常è§çš„误解是内è”函数是 gcc æä¾›çš„å¯ä»¥è®©ä»£ç è¿è¡Œæ›´å¿«çš„一个选项。虽然使用内è”
593函数有时候是æ°å½“çš„ï¼ˆæ¯”å¦‚ä½œä¸ºä¸€ç§æ›¿ä»£å®çš„æ–¹å¼ï¼Œè¯·çœ‹ç¬¬å二章),ä¸è¿‡å¾ˆå¤šæƒ…况䏋䏿˜¯ 654函数有时候是æ°å½“çš„ï¼ˆæ¯”å¦‚ä½œä¸ºä¸€ç§æ›¿ä»£å®çš„æ–¹å¼ï¼Œè¯·çœ‹ç¬¬å二章),ä¸è¿‡å¾ˆå¤šæƒ…况䏋䏿˜¯
594这样。inline关键字的过度使用会使内核å˜å¤§ï¼Œä»Žè€Œä½¿æ•´ä¸ªç³»ç»Ÿè¿è¡Œé€Ÿåº¦å˜æ…¢ã€‚因为大内核 655这样。inline 关键字的过度使用会使内核å˜å¤§ï¼Œä»Žè€Œä½¿æ•´ä¸ªç³»ç»Ÿè¿è¡Œé€Ÿåº¦å˜æ…¢ã€‚因为大内核
595会å ç”¨æ›´å¤šçš„æŒ‡ä»¤é«˜é€Ÿç¼“存(译注:一级缓存通常是指令缓存和数æ®ç¼“存分开的)而且会导 656会å ç”¨æ›´å¤šçš„æŒ‡ä»¤é«˜é€Ÿç¼“存(译注:一级缓存通常是指令缓存和数æ®ç¼“存分开的)而且会导
596致pagecacheçš„å¯ç”¨å†…å­˜å‡å°‘。想象一下,一次pagecache未命中就会导致一次ç£ç›˜å¯»å€ï¼Œå°† 657致 pagecache çš„å¯ç”¨å†…å­˜å‡å°‘。想象一下,一次pagecache未命中就会导致一次ç£ç›˜å¯»å€ï¼Œ
597耗时5毫秒。5毫秒的时间内CPU能执行很多很多指令。 658将耗时 5 毫秒。5 毫秒的时间内 CPU 能执行很多很多指令。
598 659
599一个基本的原则是如果一个函数有3行以上,就ä¸è¦æŠŠå®ƒå˜æˆå†…è”函数。这个原则的一个例 660一个基本的原则是如果一个函数有 3 行以上,就ä¸è¦æŠŠå®ƒå˜æˆå†…è”函数。这个原则的一个例
600å¤–æ˜¯ï¼Œå¦‚æžœä½ çŸ¥é“æŸä¸ªå‚数是一个编译时常é‡ï¼Œè€Œä¸”因为这个常é‡ä½ ç¡®å®šç¼–译器在编译时能 661å¤–æ˜¯ï¼Œå¦‚æžœä½ çŸ¥é“æŸä¸ªå‚数是一个编译时常é‡ï¼Œè€Œä¸”因为这个常é‡ä½ ç¡®å®šç¼–译器在编译时能
601优化掉你的函数的大部分代ç ï¼Œé‚£ä»ç„¶å¯ä»¥ç»™å®ƒåŠ ä¸Šinline关键字。kmalloc()内è”函数就 662优化掉你的函数的大部分代ç ï¼Œé‚£ä»ç„¶å¯ä»¥ç»™å®ƒåŠ ä¸Š inline 关键字。kmalloc() 内è”函数就
602是一个很好的例å­ã€‚ 663是一个很好的例å­ã€‚
603 664
604人们ç»å¸¸ä¸»å¼ ç»™static的而且åªç”¨äº†ä¸€æ¬¡çš„函数加上inline,如此ä¸ä¼šæœ‰ä»»ä½•æŸå¤±ï¼Œå› ä¸ºæ²¡ 665人们ç»å¸¸ä¸»å¼ ç»™ static 的而且åªç”¨äº†ä¸€æ¬¡çš„函数加上 inline,如此ä¸ä¼šæœ‰ä»»ä½•æŸå¤±ï¼Œå› ä¸ºæ²¡
605有什么好æƒè¡¡çš„ã€‚è™½ç„¶ä»ŽæŠ€æœ¯ä¸Šè¯´è¿™æ˜¯æ­£ç¡®çš„ï¼Œä½†æ˜¯å®žé™…ä¸Šè¿™ç§æƒ…况下å³ä½¿ä¸åŠ inline gcc 666有什么好æƒè¡¡çš„ã€‚è™½ç„¶ä»ŽæŠ€æœ¯ä¸Šè¯´è¿™æ˜¯æ­£ç¡®çš„ï¼Œä½†æ˜¯å®žé™…ä¸Šè¿™ç§æƒ…况下å³ä½¿ä¸åŠ  inline gcc
606也å¯ä»¥è‡ªåŠ¨ä½¿å…¶å†…è”。而且其他用户å¯èƒ½ä¼šè¦æ±‚移除inline,由此而æ¥çš„争论会抵消inline 667也å¯ä»¥è‡ªåŠ¨ä½¿å…¶å†…è”。而且其他用户å¯èƒ½ä¼šè¦æ±‚移除 inline,由此而æ¥çš„争论会抵消 inline
607自身的潜在价值,得ä¸å¿å¤±ã€‚ 668自身的潜在价值,得ä¸å¿å¤±ã€‚
608 669
609 670
@@ -613,37 +674,37 @@ vmalloc()。请å‚考APIæ–‡æ¡£ä»¥èŽ·å–æœ‰å…³å®ƒä»¬çš„详细信æ¯ã€‚
613的一个值å¯ä»¥è¡¨ç¤ºä¸ºä¸€ä¸ªé”™è¯¯ä»£ç æ•´æ•°ï¼ˆ-Exxxï¼å¤±è´¥ï¼Œ0ï¼æˆåŠŸï¼‰æˆ–è€…ä¸€ä¸ªâ€œæˆåŠŸâ€å¸ƒå°”值( 674的一个值å¯ä»¥è¡¨ç¤ºä¸ºä¸€ä¸ªé”™è¯¯ä»£ç æ•´æ•°ï¼ˆ-Exxxï¼å¤±è´¥ï¼Œ0ï¼æˆåŠŸï¼‰æˆ–è€…ä¸€ä¸ªâ€œæˆåŠŸâ€å¸ƒå°”值(
6140ï¼å¤±è´¥ï¼Œéž0ï¼æˆåŠŸï¼‰ã€‚ 6750ï¼å¤±è´¥ï¼Œéž0ï¼æˆåŠŸï¼‰ã€‚
615 676
616æ··åˆä½¿ç”¨è¿™ä¸¤ç§è¡¨è¾¾æ–¹å¼æ˜¯éš¾äºŽå‘现的bugçš„æ¥æºã€‚如果Cè¯­è¨€æœ¬èº«ä¸¥æ ¼åŒºåˆ†æ•´å½¢å’Œå¸ƒå°”åž‹å˜ 677æ··åˆä½¿ç”¨è¿™ä¸¤ç§è¡¨è¾¾æ–¹å¼æ˜¯éš¾äºŽå‘现的 bug çš„æ¥æºã€‚如果 C 语言本身严格区分整形和布尔型å˜
617é‡ï¼Œé‚£ä¹ˆç¼–译器就能够帮我们å‘现这些错误……ä¸è¿‡C语言ä¸åŒºåˆ†ã€‚为了é¿å…产生这ç§bug,请 678é‡ï¼Œé‚£ä¹ˆç¼–译器就能够帮我们å‘现这些错误……ä¸è¿‡ C 语言ä¸åŒºåˆ†ã€‚为了é¿å…äº§ç”Ÿè¿™ç§ bug,请
618éµå¾ªä¸‹é¢çš„æƒ¯ä¾‹ï¼š 679éµå¾ªä¸‹é¢çš„æƒ¯ä¾‹ï¼š
619 680
620 如果函数的åå­—æ˜¯ä¸€ä¸ªåŠ¨ä½œæˆ–è€…å¼ºåˆ¶æ€§çš„å‘½ä»¤ï¼Œé‚£ä¹ˆè¿™ä¸ªå‡½æ•°åº”è¯¥è¿”å›žé”™è¯¯ä»£ç æ•´ 681 如果函数的åå­—æ˜¯ä¸€ä¸ªåŠ¨ä½œæˆ–è€…å¼ºåˆ¶æ€§çš„å‘½ä»¤ï¼Œé‚£ä¹ˆè¿™ä¸ªå‡½æ•°åº”è¯¥è¿”å›žé”™è¯¯ä»£ç æ•´
621 数。如果是一个判断,那么函数应该返回一个“æˆåŠŸâ€å¸ƒå°”值。 682 数。如果是一个判断,那么函数应该返回一个“æˆåŠŸâ€å¸ƒå°”值。
622 683
623比如,“add workâ€æ˜¯ä¸€ä¸ªå‘½ä»¤ï¼Œæ‰€ä»¥add_work()函数在æˆåŠŸæ—¶è¿”å›ž0,在失败时返回-EBUSY。 684比如,“add work†是一个命令,所以 add_work() 函数在æˆåŠŸæ—¶è¿”å›ž 0,在失败时返回 -EBUSY。
624类似的,因为“PCI device presentâ€æ˜¯ä¸€ä¸ªåˆ¤æ–­ï¼Œæ‰€ä»¥pci_dev_present()函数在æˆåŠŸæ‰¾åˆ° 685类似的,因为 “PCI device present†是一个判断,所以 pci_dev_present() 函数在æˆåŠŸæ‰¾åˆ°
625一个匹é…的设备时应该返回1,如果找ä¸åˆ°æ—¶åº”该返回0。 686一个匹é…的设备时应该返回 1,如果找ä¸åˆ°æ—¶åº”该返回 0。
626 687
627所有导出(译注:EXPORT)的函数都必须éµå®ˆè¿™ä¸ªæƒ¯ä¾‹ï¼Œæ‰€æœ‰çš„å…¬å…±å‡½æ•°ä¹Ÿéƒ½åº”è¯¥å¦‚æ­¤ã€‚ç§ 688所有导出(译注:EXPORT)的函数都必须éµå®ˆè¿™ä¸ªæƒ¯ä¾‹ï¼Œæ‰€æœ‰çš„公共函数也都应该如此。ç§
628有(static)函数ä¸éœ€è¦å¦‚此,但是我们也推è这样åšã€‚ 689有(static)函数ä¸éœ€è¦å¦‚此,但是我们也推è这样åšã€‚
629 690
630è¿”å›žå€¼æ˜¯å®žé™…è®¡ç®—ç»“æžœè€Œä¸æ˜¯è®¡ç®—æ˜¯å¦æˆåŠŸçš„æ ‡å¿—çš„å‡½æ•°ä¸å—此惯例的é™åˆ¶ã€‚一般的,他们 691è¿”å›žå€¼æ˜¯å®žé™…è®¡ç®—ç»“æžœè€Œä¸æ˜¯è®¡ç®—æ˜¯å¦æˆåŠŸçš„æ ‡å¿—çš„å‡½æ•°ä¸å—此惯例的é™åˆ¶ã€‚一般的,他们
631通过返回一些正常值范围之外的结果æ¥è¡¨ç¤ºå‡ºé”™ã€‚å…¸åž‹çš„ä¾‹å­æ˜¯è¿”回指针的函数,他们使用 692通过返回一些正常值范围之外的结果æ¥è¡¨ç¤ºå‡ºé”™ã€‚å…¸åž‹çš„ä¾‹å­æ˜¯è¿”回指针的函数,他们使用
632NULL或者ERR_PTRæœºåˆ¶æ¥æŠ¥å‘Šé”™è¯¯ã€‚ 693NULL 或者 ERR_PTR æœºåˆ¶æ¥æŠ¥å‘Šé”™è¯¯ã€‚
633 694
634 695
635 第å七章:ä¸è¦é‡æ–°å‘æ˜Žå†…æ ¸å® 696 第å七章:ä¸è¦é‡æ–°å‘明内核å®
636 697
637头文件include/linux/kernel.h包å«äº†ä¸€äº›å®ï¼Œä½ åº”该使用它们,而ä¸è¦è‡ªå·±å†™ä¸€äº›å®ƒä»¬çš„ 698头文件 include/linux/kernel.h 包å«äº†ä¸€äº›å®ï¼Œä½ åº”该使用它们,而ä¸è¦è‡ªå·±å†™ä¸€äº›å®ƒä»¬çš„
638å˜ç§ã€‚比如,如果你需è¦è®¡ç®—ä¸€ä¸ªæ•°ç»„çš„é•¿åº¦ï¼Œä½¿ç”¨è¿™ä¸ªå® 699å˜ç§ã€‚比如,如果你需è¦è®¡ç®—一个数组的长度,使用这个å®
639 700
640 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 701 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
641 702
642类似的,如果你è¦è®¡ç®—æŸç»“构体æˆå‘˜çš„大å°ï¼Œä½¿ç”¨ 703类似的,如果你è¦è®¡ç®—æŸç»“构体æˆå‘˜çš„大å°ï¼Œä½¿ç”¨
643 704
644 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 705 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
645 706
646还有å¯ä»¥åšä¸¥æ ¼çš„类型检查的min()å’Œmax()å®ï¼Œå¦‚果你需è¦å¯ä»¥ä½¿ç”¨å®ƒä»¬ã€‚ä½ å¯ä»¥è‡ªå·±çœ‹çœ‹ 707还有å¯ä»¥åšä¸¥æ ¼çš„类型检查的 min() å’Œ max() å®ï¼Œå¦‚果你需è¦å¯ä»¥ä½¿ç”¨å®ƒä»¬ã€‚ä½ å¯ä»¥è‡ªå·±çœ‹çœ‹
647那个头文件里还定义了什么你å¯ä»¥æ‹¿æ¥ç”¨çš„东西,如果有定义的è¯ï¼Œä½ å°±ä¸åº”在你的代ç é‡Œ 708那个头文件里还定义了什么你å¯ä»¥æ‹¿æ¥ç”¨çš„东西,如果有定义的è¯ï¼Œä½ å°±ä¸åº”在你的代ç é‡Œ
648è‡ªå·±é‡æ–°å®šä¹‰ã€‚ 709è‡ªå·±é‡æ–°å®šä¹‰ã€‚
649 710
@@ -653,42 +714,100 @@ NULL或者ERR_PTRæœºåˆ¶æ¥æŠ¥å‘Šé”™è¯¯ã€‚
653有一些编辑器å¯ä»¥è§£é‡ŠåµŒå…¥åœ¨æºæ–‡ä»¶é‡Œçš„由一些特殊标记标明的é…置信æ¯ã€‚比如,emacs 714有一些编辑器å¯ä»¥è§£é‡ŠåµŒå…¥åœ¨æºæ–‡ä»¶é‡Œçš„由一些特殊标记标明的é…置信æ¯ã€‚比如,emacs
654能够解释被标记æˆè¿™æ ·çš„行: 715能够解释被标记æˆè¿™æ ·çš„行:
655 716
656-*- mode: c -*- 717 -*- mode: c -*-
657 718
658或者这样的: 719或者这样的:
659 720
660/* 721 /*
661Local Variables: 722 Local Variables:
662compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c" 723 compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
663End: 724 End:
664*/ 725 */
665 726
666Vim能够解释这样的标记: 727Vim 能够解释这样的标记:
667 728
668/* vim:set sw=8 noet */ 729 /* vim:set sw=8 noet */
669 730
670ä¸è¦åœ¨æºä»£ç ä¸­åŒ…å«ä»»ä½•这样的内容。æ¯ä¸ªäººéƒ½æœ‰ä»–自己的编辑器é…ç½®ï¼Œä½ çš„æºæ–‡ä»¶ä¸åº” 731ä¸è¦åœ¨æºä»£ç ä¸­åŒ…å«ä»»ä½•这样的内容。æ¯ä¸ªäººéƒ½æœ‰ä»–自己的编辑器é…ç½®ï¼Œä½ çš„æºæ–‡ä»¶ä¸åº”
671该覆盖别人的é…置。这包括有关缩进和模å¼é…置的标记。人们å¯ä»¥ä½¿ç”¨ä»–们自己定制的模 732该覆盖别人的é…置。这包括有关缩进和模å¼é…置的标记。人们å¯ä»¥ä½¿ç”¨ä»–们自己定制的模
672å¼ï¼Œæˆ–者使用其他å¯ä»¥äº§ç”Ÿæ­£ç¡®çš„缩进的巧妙方法。 733å¼ï¼Œæˆ–者使用其他å¯ä»¥äº§ç”Ÿæ­£ç¡®çš„缩进的巧妙方法。
673 734
674 735
736 第åä¹ç« ï¼šå†…è”æ±‡ç¼–
737
738在特定架构的代ç ä¸­ï¼Œä½ ä¹Ÿè®¸éœ€è¦å†…è”æ±‡ç¼–æ¥ä½¿ç”¨ CPU 接å£å’Œå¹³å°ç›¸å…³åŠŸèƒ½ã€‚åœ¨éœ€è¦
739è¿™ä¹ˆåšæ—¶ï¼Œä¸è¦çŠ¹è±«ã€‚ç„¶è€Œï¼Œå½“ C å¯ä»¥å®Œæˆå·¥ä½œæ—¶ï¼Œä¸è¦æ— ç«¯åœ°ä½¿ç”¨å†…è”æ±‡ç¼–。如果
740å¯èƒ½ï¼Œä½ å¯ä»¥å¹¶ä¸”应该用 C 和硬件交互。
741
742è€ƒè™‘åŽ»å†™é€šç”¨ä¸€ç‚¹çš„å†…è”æ±‡ç¼–ä½œä¸ºç®€æ˜Žçš„è¾…åŠ©å‡½æ•°ï¼Œè€Œä¸æ˜¯é‡å¤å†™ä¸‹å®ƒä»¬çš„细节。记ä½
743å†…è”æ±‡ç¼–å¯ä»¥ä½¿ç”¨ C 傿•°ã€‚
744
745大而特殊的汇编函数应该放在 .S 文件中,对应 C 的原型定义在 C 头文件中。汇编
746函数的 C 原型应该使用 “asmlinkageâ€ã€‚
747
748ä½ å¯èƒ½éœ€è¦å°†ä½ çš„æ±‡ç¼–è¯­å¥æ ‡è®°ä¸º volatile,æ¥é˜»æ­¢ GCC 在没å‘现任何副作用åŽå°±
749移除了它。你ä¸å¿…总是这样åšï¼Œè™½ç„¶ï¼Œè¿™æ ·å¯ä»¥é™åˆ¶ä¸å¿…è¦çš„优化。
750
751在写一个包å«å¤šæ¡æŒ‡ä»¤çš„å•ä¸ªå†…è”æ±‡ç¼–è¯­å¥æ—¶ï¼ŒæŠŠæ¯æ¡æŒ‡ä»¤ç”¨å¼•å·å­—符串分离,并写在
752å•独一行,在æ¯ä¸ªå­—符串结尾,除了 \n\t 结尾之外,在汇编输出中适当地缩进下
753ä¸€æ¡æŒ‡ä»¤ï¼š
754
755 asm ("magic %reg1, #42\n\t"
756 "more_magic %reg2, %reg3"
757 : /* outputs */ : /* inputs */ : /* clobbers */);
758
759
760 第二å章:æ¡ä»¶ç¼–译
761
762åªè¦å¯èƒ½ï¼Œå°±ä¸è¦åœ¨ .c 文件里é¢ä½¿ç”¨é¢„å¤„ç†æ¡ä»¶ï¼›è¿™æ ·åšè®©ä»£ç æ›´éš¾é˜…读并且逻辑难以
763跟踪。替代方案是,在头文件定义函数在这些 .c 文件中使用这类的æ¡ä»¶è¡¨è¾¾å¼ï¼Œæä¾›ç©º
764æ“作的桩版本(译注:桩程åºï¼Œæ˜¯æŒ‡ç”¨æ¥æ›¿æ¢ä¸€éƒ¨åˆ†åŠŸèƒ½çš„ç¨‹åºæ®µï¼‰åœ¨ #else 情况下,
765å†ä»Ž .c 文件中无æ¡ä»¶åœ°è°ƒç”¨è¿™äº›å‡½æ•°ã€‚编译器会é¿å…生æˆä»»ä½•桩调用的代ç ï¼Œäº§ç”Ÿä¸€è‡´
766的结果,但逻辑将更加清晰。
767
768å®å¯ç¼–è¯‘æ•´ä¸ªå‡½æ•°ï¼Œè€Œä¸æ˜¯éƒ¨åˆ†å‡½æ•°æˆ–部分表达å¼ã€‚è€Œä¸æ˜¯åœ¨ä¸€ä¸ªè¡¨è¾¾å¼æ·»åŠ  ifdef,
769è§£æžéƒ¨åˆ†æˆ–全部表达å¼åˆ°ä¸€ä¸ªå•独的辅助函数,并应用æ¡ä»¶åˆ°è¯¥å‡½æ•°å†…。
770
771如果你有一个在特定é…置中å¯èƒ½æ˜¯æœªä½¿ç”¨çš„函数或å˜é‡ï¼Œç¼–译器将警告它定义了但未使用,
772标记这个定义为 __maybe_unused è€Œä¸æ˜¯å°†å®ƒåŒ…å«åœ¨ä¸€ä¸ªé¢„å¤„ç†æ¡ä»¶ä¸­ã€‚(然而,如果
773一个函数或å˜é‡æ€»æ˜¯æœªä½¿ç”¨çš„,就直接删除它。)
774
775在代ç ä¸­ï¼Œå¯èƒ½çš„æƒ…况下,使用 IS_ENABLED 宿¥è½¬åŒ–æŸä¸ª Kconfig 标记为 C 的布尔
776表达å¼ï¼Œå¹¶åœ¨æ­£å¸¸çš„ C æ¡ä»¶ä¸­ä½¿ç”¨å®ƒï¼š
777
778 if (IS_ENABLED(CONFIG_SOMETHING)) {
779 ...
780 }
781
782编译器会无æ¡ä»¶åœ°åšå¸¸æ•°åˆå¹¶ï¼Œå°±åƒä½¿ç”¨ #ifdef é‚£æ ·ï¼ŒåŒ…å«æˆ–排除代ç å—,所以这ä¸ä¼š
783带æ¥ä»»ä½•è¿è¡Œæ—¶å¼€é”€ã€‚ç„¶è€Œï¼Œè¿™ç§æ–¹æ³•便—§å…许 C 编译器查看å—内的代ç ï¼Œå¹¶æ£€æŸ¥å®ƒçš„æ­£ç¡®
784性(语法,类型,符å·å¼•用,等等)。因此,如果æ¡ä»¶ä¸æ»¡è¶³ï¼Œä»£ç å—内的引用符å·å°†ä¸å­˜åœ¨ï¼Œ
785你必须继续使用 #ifdef。
786
787在任何有æ„义的 #if 或 #ifdef å—的末尾(超过几行),在 #endif åŒä¸€è¡Œçš„åŽé¢å†™ä¸‹
788注释,指出该æ¡ä»¶è¡¨è¾¾å¼è¢«ä½¿ç”¨ã€‚例如:
789
790 #ifdef CONFIG_SOMETHING
791 ...
792 #endif /* CONFIG_SOMETHING */
793
675 794
676 附录 I:å‚考 795 附录 I:å‚考
677 796
678The C Programming Language, 第二版, 作者Brian W. Kernighan和Denni 797The C Programming Language, 第二版
679M. Ritchie. Prentice Hall, Inc., 1988. ISBN 0-13-110362-8 (软皮), 798作者:Brian W. Kernighan 和 Denni M. Ritchie.
6800-13-110370-9 (硬皮). URL: http://cm.bell-labs.com/cm/cs/cbook/ 799Prentice Hall, Inc., 1988.
800ISBN 0-13-110362-8 (软皮), 0-13-110370-9 (硬皮).
681 801
682The Practice of Programming 作者Brian W. Kernighan和Rob Pike. Addison-Wesley, 802The Practice of Programming
683Inc., 1999. ISBN 0-201-61586-X. URL: http://cm.bell-labs.com/cm/cs/tpop/ 803作者:Brian W. Kernighan 和 Rob Pike.
804Addison-Wesley, Inc., 1999.
805ISBN 0-201-61586-X.
684 806
685cpp,gcc,gcc internalså’Œindentçš„GNU手册——和K&RåŠæœ¬æ–‡ç›¸ç¬¦åˆçš„部分,全部å¯ä»¥åœ¨ 807GNU 手册 - éµå¾ª K&R 标准和此文本 - cpp, gcc, gcc internals and indent,
686http://www.gnu.org/manual/找到 808都å¯ä»¥ä»Ž http://www.gnu.org/manual/ 找到
687 809
688WG14是C语言的国际标准化工作组,URL: http://www.open-std.org/JTC1/SC22/WG14/ 810WG14是C语言的国际标准化工作组,URL: http://www.open-std.org/JTC1/SC22/WG14/
689 811
690Kernel CodingStyle,作者greg@kroah.comå‘表于OLS 2002: 812Kernel CodingStyle,作者 greg@kroah.com å‘表于OLS 2002:
691http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/ 813http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
692
693--
694æœ€åŽæ›´æ–°äºŽ2007å¹´7月13日。
diff --git a/MAINTAINERS b/MAINTAINERS
index 7304d2e37a98..0c22fe584283 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -865,9 +865,17 @@ F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
865ARM HDLCD DRM DRIVER 865ARM HDLCD DRM DRIVER
866M: Liviu Dudau <liviu.dudau@arm.com> 866M: Liviu Dudau <liviu.dudau@arm.com>
867S: Supported 867S: Supported
868F: drivers/gpu/drm/arm/ 868F: drivers/gpu/drm/arm/hdlcd_*
869F: Documentation/devicetree/bindings/display/arm,hdlcd.txt 869F: Documentation/devicetree/bindings/display/arm,hdlcd.txt
870 870
871ARM MALI-DP DRM DRIVER
872M: Liviu Dudau <liviu.dudau@arm.com>
873M: Brian Starkey <brian.starkey@arm.com>
874M: Mali DP Maintainers <malidp@foss.arm.com>
875S: Supported
876F: drivers/gpu/drm/arm/
877F: Documentation/devicetree/bindings/display/arm,malidp.txt
878
871ARM MFM AND FLOPPY DRIVERS 879ARM MFM AND FLOPPY DRIVERS
872M: Ian Molton <spyro@f2s.com> 880M: Ian Molton <spyro@f2s.com>
873S: Maintained 881S: Maintained
@@ -1159,6 +1167,7 @@ F: arch/arm/mach-footbridge/
1159ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 1167ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
1160M: Shawn Guo <shawnguo@kernel.org> 1168M: Shawn Guo <shawnguo@kernel.org>
1161M: Sascha Hauer <kernel@pengutronix.de> 1169M: Sascha Hauer <kernel@pengutronix.de>
1170R: Fabio Estevam <fabio.estevam@nxp.com>
1162L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1171L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1163S: Maintained 1172S: Maintained
1164T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git 1173T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2242,7 +2251,8 @@ F: include/net/ax25.h
2242F: net/ax25/ 2251F: net/ax25/
2243 2252
2244AZ6007 DVB DRIVER 2253AZ6007 DVB DRIVER
2245M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 2254M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2255M: Mauro Carvalho Chehab <mchehab@kernel.org>
2246L: linux-media@vger.kernel.org 2256L: linux-media@vger.kernel.org
2247W: https://linuxtv.org 2257W: https://linuxtv.org
2248T: git git://linuxtv.org/media_tree.git 2258T: git git://linuxtv.org/media_tree.git
@@ -2709,7 +2719,8 @@ F: Documentation/filesystems/btrfs.txt
2709F: fs/btrfs/ 2719F: fs/btrfs/
2710 2720
2711BTTV VIDEO4LINUX DRIVER 2721BTTV VIDEO4LINUX DRIVER
2712M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 2722M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2723M: Mauro Carvalho Chehab <mchehab@kernel.org>
2713L: linux-media@vger.kernel.org 2724L: linux-media@vger.kernel.org
2714W: https://linuxtv.org 2725W: https://linuxtv.org
2715T: git git://linuxtv.org/media_tree.git 2726T: git git://linuxtv.org/media_tree.git
@@ -2773,9 +2784,9 @@ F: include/net/caif/
2773F: net/caif/ 2784F: net/caif/
2774 2785
2775CALGARY x86-64 IOMMU 2786CALGARY x86-64 IOMMU
2776M: Muli Ben-Yehuda <muli@il.ibm.com> 2787M: Muli Ben-Yehuda <mulix@mulix.org>
2777M: "Jon D. Mason" <jdmason@kudzu.us> 2788M: Jon Mason <jdmason@kudzu.us>
2778L: discuss@x86-64.org 2789L: iommu@lists.linux-foundation.org
2779S: Maintained 2790S: Maintained
2780F: arch/x86/kernel/pci-calgary_64.c 2791F: arch/x86/kernel/pci-calgary_64.c
2781F: arch/x86/kernel/tce_64.c 2792F: arch/x86/kernel/tce_64.c
@@ -3086,6 +3097,7 @@ M: Stephen Boyd <sboyd@codeaurora.org>
3086L: linux-clk@vger.kernel.org 3097L: linux-clk@vger.kernel.org
3087T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git 3098T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
3088S: Maintained 3099S: Maintained
3100F: Documentation/devicetree/bindings/clock/
3089F: drivers/clk/ 3101F: drivers/clk/
3090X: drivers/clk/clkdev.c 3102X: drivers/clk/clkdev.c
3091F: include/linux/clk-pr* 3103F: include/linux/clk-pr*
@@ -3343,7 +3355,8 @@ S: Maintained
3343F: drivers/media/dvb-frontends/cx24120* 3355F: drivers/media/dvb-frontends/cx24120*
3344 3356
3345CX88 VIDEO4LINUX DRIVER 3357CX88 VIDEO4LINUX DRIVER
3346M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 3358M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
3359M: Mauro Carvalho Chehab <mchehab@kernel.org>
3347L: linux-media@vger.kernel.org 3360L: linux-media@vger.kernel.org
3348W: https://linuxtv.org 3361W: https://linuxtv.org
3349T: git git://linuxtv.org/media_tree.git 3362T: git git://linuxtv.org/media_tree.git
@@ -3773,6 +3786,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
3773S: Maintained 3786S: Maintained
3774F: drivers/dma/ 3787F: drivers/dma/
3775F: include/linux/dmaengine.h 3788F: include/linux/dmaengine.h
3789F: Documentation/devicetree/bindings/dma/
3776F: Documentation/dmaengine/ 3790F: Documentation/dmaengine/
3777T: git git://git.infradead.org/users/vkoul/slave-dma.git 3791T: git git://git.infradead.org/users/vkoul/slave-dma.git
3778 3792
@@ -3854,7 +3868,10 @@ T: git git://people.freedesktop.org/~airlied/linux
3854S: Maintained 3868S: Maintained
3855F: drivers/gpu/drm/ 3869F: drivers/gpu/drm/
3856F: drivers/gpu/vga/ 3870F: drivers/gpu/vga/
3857F: Documentation/DocBook/gpu.* 3871F: Documentation/devicetree/bindings/display/
3872F: Documentation/devicetree/bindings/gpu/
3873F: Documentation/devicetree/bindings/video/
3874F: Documentation/gpu/
3858F: include/drm/ 3875F: include/drm/
3859F: include/uapi/drm/ 3876F: include/uapi/drm/
3860 3877
@@ -3906,6 +3923,7 @@ S: Supported
3906F: drivers/gpu/drm/i915/ 3923F: drivers/gpu/drm/i915/
3907F: include/drm/i915* 3924F: include/drm/i915*
3908F: include/uapi/drm/i915_drm.h 3925F: include/uapi/drm/i915_drm.h
3926F: Documentation/gpu/i915.rst
3909 3927
3910DRM DRIVERS FOR ATMEL HLCDC 3928DRM DRIVERS FOR ATMEL HLCDC
3911M: Boris Brezillon <boris.brezillon@free-electrons.com> 3929M: Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -4101,6 +4119,21 @@ F: drivers/gpu/drm/vc4/
4101F: include/uapi/drm/vc4_drm.h 4119F: include/uapi/drm/vc4_drm.h
4102F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt 4120F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
4103 4121
4122DRM DRIVERS FOR TI OMAP
4123M: Tomi Valkeinen <tomi.valkeinen@ti.com>
4124L: dri-devel@lists.freedesktop.org
4125S: Maintained
4126F: drivers/gpu/drm/omapdrm/
4127F: Documentation/devicetree/bindings/display/ti/
4128
4129DRM DRIVERS FOR TI LCDC
4130M: Jyri Sarha <jsarha@ti.com>
4131R: Tomi Valkeinen <tomi.valkeinen@ti.com>
4132L: dri-devel@lists.freedesktop.org
4133S: Maintained
4134F: drivers/gpu/drm/tilcdc/
4135F: Documentation/devicetree/bindings/display/tilcdc/
4136
4104DSBR100 USB FM RADIO DRIVER 4137DSBR100 USB FM RADIO DRIVER
4105M: Alexey Klimov <klimov.linux@gmail.com> 4138M: Alexey Klimov <klimov.linux@gmail.com>
4106L: linux-media@vger.kernel.org 4139L: linux-media@vger.kernel.org
@@ -4290,7 +4323,8 @@ F: fs/ecryptfs/
4290EDAC-CORE 4323EDAC-CORE
4291M: Doug Thompson <dougthompson@xmission.com> 4324M: Doug Thompson <dougthompson@xmission.com>
4292M: Borislav Petkov <bp@alien8.de> 4325M: Borislav Petkov <bp@alien8.de>
4293M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4326M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4327M: Mauro Carvalho Chehab <mchehab@kernel.org>
4294L: linux-edac@vger.kernel.org 4328L: linux-edac@vger.kernel.org
4295T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next 4329T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
4296T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next 4330T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
@@ -4335,7 +4369,8 @@ S: Maintained
4335F: drivers/edac/e7xxx_edac.c 4369F: drivers/edac/e7xxx_edac.c
4336 4370
4337EDAC-GHES 4371EDAC-GHES
4338M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4372M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4373M: Mauro Carvalho Chehab <mchehab@kernel.org>
4339L: linux-edac@vger.kernel.org 4374L: linux-edac@vger.kernel.org
4340S: Maintained 4375S: Maintained
4341F: drivers/edac/ghes_edac.c 4376F: drivers/edac/ghes_edac.c
@@ -4359,19 +4394,22 @@ S: Maintained
4359F: drivers/edac/i5000_edac.c 4394F: drivers/edac/i5000_edac.c
4360 4395
4361EDAC-I5400 4396EDAC-I5400
4362M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4397M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4398M: Mauro Carvalho Chehab <mchehab@kernel.org>
4363L: linux-edac@vger.kernel.org 4399L: linux-edac@vger.kernel.org
4364S: Maintained 4400S: Maintained
4365F: drivers/edac/i5400_edac.c 4401F: drivers/edac/i5400_edac.c
4366 4402
4367EDAC-I7300 4403EDAC-I7300
4368M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4404M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4405M: Mauro Carvalho Chehab <mchehab@kernel.org>
4369L: linux-edac@vger.kernel.org 4406L: linux-edac@vger.kernel.org
4370S: Maintained 4407S: Maintained
4371F: drivers/edac/i7300_edac.c 4408F: drivers/edac/i7300_edac.c
4372 4409
4373EDAC-I7CORE 4410EDAC-I7CORE
4374M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4411M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4412M: Mauro Carvalho Chehab <mchehab@kernel.org>
4375L: linux-edac@vger.kernel.org 4413L: linux-edac@vger.kernel.org
4376S: Maintained 4414S: Maintained
4377F: drivers/edac/i7core_edac.c 4415F: drivers/edac/i7core_edac.c
@@ -4408,7 +4446,8 @@ S: Maintained
4408F: drivers/edac/r82600_edac.c 4446F: drivers/edac/r82600_edac.c
4409 4447
4410EDAC-SBRIDGE 4448EDAC-SBRIDGE
4411M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4449M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4450M: Mauro Carvalho Chehab <mchehab@kernel.org>
4412L: linux-edac@vger.kernel.org 4451L: linux-edac@vger.kernel.org
4413S: Maintained 4452S: Maintained
4414F: drivers/edac/sb_edac.c 4453F: drivers/edac/sb_edac.c
@@ -4467,7 +4506,8 @@ S: Maintained
4467F: drivers/net/ethernet/ibm/ehea/ 4506F: drivers/net/ethernet/ibm/ehea/
4468 4507
4469EM28XX VIDEO4LINUX DRIVER 4508EM28XX VIDEO4LINUX DRIVER
4470M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4509M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4510M: Mauro Carvalho Chehab <mchehab@kernel.org>
4471L: linux-media@vger.kernel.org 4511L: linux-media@vger.kernel.org
4472W: https://linuxtv.org 4512W: https://linuxtv.org
4473T: git git://linuxtv.org/media_tree.git 4513T: git git://linuxtv.org/media_tree.git
@@ -6486,6 +6526,7 @@ F: include/uapi/linux/sunrpc/
6486 6526
6487KERNEL SELFTEST FRAMEWORK 6527KERNEL SELFTEST FRAMEWORK
6488M: Shuah Khan <shuahkh@osg.samsung.com> 6528M: Shuah Khan <shuahkh@osg.samsung.com>
6529M: Shuah Khan <shuah@kernel.org>
6489L: linux-kselftest@vger.kernel.org 6530L: linux-kselftest@vger.kernel.org
6490T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6531T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6491S: Maintained 6532S: Maintained
@@ -7357,7 +7398,8 @@ S: Supported
7357F: drivers/media/pci/netup_unidvb/* 7398F: drivers/media/pci/netup_unidvb/*
7358 7399
7359MEDIA INPUT INFRASTRUCTURE (V4L/DVB) 7400MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
7360M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 7401M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
7402M: Mauro Carvalho Chehab <mchehab@kernel.org>
7361P: LinuxTV.org Project 7403P: LinuxTV.org Project
7362L: linux-media@vger.kernel.org 7404L: linux-media@vger.kernel.org
7363W: https://linuxtv.org 7405W: https://linuxtv.org
@@ -7989,6 +8031,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
7989T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8031T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
7990T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8032T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
7991S: Odd Fixes 8033S: Odd Fixes
8034F: Documentation/devicetree/bindings/net/
7992F: drivers/net/ 8035F: drivers/net/
7993F: include/linux/if_* 8036F: include/linux/if_*
7994F: include/linux/netdevice.h 8037F: include/linux/netdevice.h
@@ -8007,6 +8050,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/
8007T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git 8050T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
8008T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git 8051T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
8009S: Maintained 8052S: Maintained
8053F: Documentation/devicetree/bindings/net/wireless/
8010F: drivers/net/wireless/ 8054F: drivers/net/wireless/
8011 8055
8012NETXEN (1/10) GbE SUPPORT 8056NETXEN (1/10) GbE SUPPORT
@@ -8404,10 +8448,9 @@ F: drivers/i2c/busses/i2c-ocores.c
8404OPEN FIRMWARE AND FLATTENED DEVICE TREE 8448OPEN FIRMWARE AND FLATTENED DEVICE TREE
8405M: Rob Herring <robh+dt@kernel.org> 8449M: Rob Herring <robh+dt@kernel.org>
8406M: Frank Rowand <frowand.list@gmail.com> 8450M: Frank Rowand <frowand.list@gmail.com>
8407M: Grant Likely <grant.likely@linaro.org>
8408L: devicetree@vger.kernel.org 8451L: devicetree@vger.kernel.org
8409W: http://www.devicetree.org/ 8452W: http://www.devicetree.org/
8410T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git 8453T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
8411S: Maintained 8454S: Maintained
8412F: drivers/of/ 8455F: drivers/of/
8413F: include/linux/of*.h 8456F: include/linux/of*.h
@@ -8415,12 +8458,10 @@ F: scripts/dtc/
8415 8458
8416OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS 8459OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
8417M: Rob Herring <robh+dt@kernel.org> 8460M: Rob Herring <robh+dt@kernel.org>
8418M: Pawel Moll <pawel.moll@arm.com>
8419M: Mark Rutland <mark.rutland@arm.com> 8461M: Mark Rutland <mark.rutland@arm.com>
8420M: Ian Campbell <ijc+devicetree@hellion.org.uk>
8421M: Kumar Gala <galak@codeaurora.org>
8422L: devicetree@vger.kernel.org 8462L: devicetree@vger.kernel.org
8423T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git 8463T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
8464Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
8424S: Maintained 8465S: Maintained
8425F: Documentation/devicetree/ 8466F: Documentation/devicetree/
8426F: arch/*/boot/dts/ 8467F: arch/*/boot/dts/
@@ -8944,6 +8985,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
8944L: linux-gpio@vger.kernel.org 8985L: linux-gpio@vger.kernel.org
8945T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 8986T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
8946S: Maintained 8987S: Maintained
8988F: Documentation/devicetree/bindings/pinctrl/
8947F: drivers/pinctrl/ 8989F: drivers/pinctrl/
8948F: include/linux/pinctrl/ 8990F: include/linux/pinctrl/
8949 8991
@@ -9851,7 +9893,8 @@ S: Odd Fixes
9851F: drivers/media/i2c/saa6588* 9893F: drivers/media/i2c/saa6588*
9852 9894
9853SAA7134 VIDEO4LINUX DRIVER 9895SAA7134 VIDEO4LINUX DRIVER
9854M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 9896M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
9897M: Mauro Carvalho Chehab <mchehab@kernel.org>
9855L: linux-media@vger.kernel.org 9898L: linux-media@vger.kernel.org
9856W: https://linuxtv.org 9899W: https://linuxtv.org
9857T: git git://linuxtv.org/media_tree.git 9900T: git git://linuxtv.org/media_tree.git
@@ -10370,7 +10413,8 @@ S: Maintained
10370F: drivers/media/radio/si4713/radio-usb-si4713.c 10413F: drivers/media/radio/si4713/radio-usb-si4713.c
10371 10414
10372SIANO DVB DRIVER 10415SIANO DVB DRIVER
10373M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 10416M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
10417M: Mauro Carvalho Chehab <mchehab@kernel.org>
10374L: linux-media@vger.kernel.org 10418L: linux-media@vger.kernel.org
10375W: https://linuxtv.org 10419W: https://linuxtv.org
10376T: git git://linuxtv.org/media_tree.git 10420T: git git://linuxtv.org/media_tree.git
@@ -11136,7 +11180,8 @@ S: Maintained
11136F: drivers/media/i2c/tda9840* 11180F: drivers/media/i2c/tda9840*
11137 11181
11138TEA5761 TUNER DRIVER 11182TEA5761 TUNER DRIVER
11139M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11183M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11184M: Mauro Carvalho Chehab <mchehab@kernel.org>
11140L: linux-media@vger.kernel.org 11185L: linux-media@vger.kernel.org
11141W: https://linuxtv.org 11186W: https://linuxtv.org
11142T: git git://linuxtv.org/media_tree.git 11187T: git git://linuxtv.org/media_tree.git
@@ -11144,7 +11189,8 @@ S: Odd fixes
11144F: drivers/media/tuners/tea5761.* 11189F: drivers/media/tuners/tea5761.*
11145 11190
11146TEA5767 TUNER DRIVER 11191TEA5767 TUNER DRIVER
11147M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11192M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11193M: Mauro Carvalho Chehab <mchehab@kernel.org>
11148L: linux-media@vger.kernel.org 11194L: linux-media@vger.kernel.org
11149W: https://linuxtv.org 11195W: https://linuxtv.org
11150T: git git://linuxtv.org/media_tree.git 11196T: git git://linuxtv.org/media_tree.git
@@ -11531,7 +11577,8 @@ F: include/linux/shmem_fs.h
11531F: mm/shmem.c 11577F: mm/shmem.c
11532 11578
11533TM6000 VIDEO4LINUX DRIVER 11579TM6000 VIDEO4LINUX DRIVER
11534M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11580M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11581M: Mauro Carvalho Chehab <mchehab@kernel.org>
11535L: linux-media@vger.kernel.org 11582L: linux-media@vger.kernel.org
11536W: https://linuxtv.org 11583W: https://linuxtv.org
11537T: git git://linuxtv.org/media_tree.git 11584T: git git://linuxtv.org/media_tree.git
@@ -11885,7 +11932,8 @@ F: drivers/usb/common/usb-otg-fsm.c
11885 11932
11886USB OVER IP DRIVER 11933USB OVER IP DRIVER
11887M: Valentina Manea <valentina.manea.m@gmail.com> 11934M: Valentina Manea <valentina.manea.m@gmail.com>
11888M: Shuah Khan <shuah.kh@samsung.com> 11935M: Shuah Khan <shuahkh@osg.samsung.com>
11936M: Shuah Khan <shuah@kernel.org>
11889L: linux-usb@vger.kernel.org 11937L: linux-usb@vger.kernel.org
11890S: Maintained 11938S: Maintained
11891F: Documentation/usb/usbip_protocol.txt 11939F: Documentation/usb/usbip_protocol.txt
@@ -11956,6 +12004,7 @@ L: linux-usb@vger.kernel.org
11956W: http://www.linux-usb.org 12004W: http://www.linux-usb.org
11957T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git 12005T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
11958S: Supported 12006S: Supported
12007F: Documentation/devicetree/bindings/usb/
11959F: Documentation/usb/ 12008F: Documentation/usb/
11960F: drivers/usb/ 12009F: drivers/usb/
11961F: include/linux/usb.h 12010F: include/linux/usb.h
@@ -12129,6 +12178,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS
12129M: "Michael S. Tsirkin" <mst@redhat.com> 12178M: "Michael S. Tsirkin" <mst@redhat.com>
12130L: virtualization@lists.linux-foundation.org 12179L: virtualization@lists.linux-foundation.org
12131S: Maintained 12180S: Maintained
12181F: Documentation/devicetree/bindings/virtio/
12132F: drivers/virtio/ 12182F: drivers/virtio/
12133F: tools/virtio/ 12183F: tools/virtio/
12134F: drivers/net/virtio_net.c 12184F: drivers/net/virtio_net.c
@@ -12517,7 +12567,8 @@ S: Maintained
12517F: arch/x86/entry/vdso/ 12567F: arch/x86/entry/vdso/
12518 12568
12519XC2028/3028 TUNER DRIVER 12569XC2028/3028 TUNER DRIVER
12520M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 12570M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
12571M: Mauro Carvalho Chehab <mchehab@kernel.org>
12521L: linux-media@vger.kernel.org 12572L: linux-media@vger.kernel.org
12522W: https://linuxtv.org 12573W: https://linuxtv.org
12523T: git git://linuxtv.org/media_tree.git 12574T: git git://linuxtv.org/media_tree.git
diff --git a/Makefile b/Makefile
index 0f70de63cfdb..4acefc10c13a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 7 2PATCHLEVEL = 7
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc5
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -1412,8 +1412,11 @@ $(help-board-dirs): help-%:
1412 1412
1413# Documentation targets 1413# Documentation targets
1414# --------------------------------------------------------------------------- 1414# ---------------------------------------------------------------------------
1415%docs: scripts_basic FORCE 1415DOC_TARGETS := xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs
1416PHONY += $(DOC_TARGETS)
1417$(DOC_TARGETS): scripts_basic FORCE
1416 $(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype 1418 $(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
1419 $(Q)$(MAKE) $(build)=Documentation -f $(srctree)/Documentation/Makefile.sphinx $@
1417 $(Q)$(MAKE) $(build)=Documentation/DocBook $@ 1420 $(Q)$(MAKE) $(build)=Documentation/DocBook $@
1418 1421
1419else # KBUILD_EXTMOD 1422else # KBUILD_EXTMOD
diff --git a/arch/Kconfig b/arch/Kconfig
index d794384a0404..15996290fed4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
226config ARCH_TASK_STRUCT_ALLOCATOR 226config ARCH_TASK_STRUCT_ALLOCATOR
227 bool 227 bool
228 228
229# Select if arch has its private alloc_thread_info() function 229# Select if arch has its private alloc_thread_stack() function
230config ARCH_THREAD_INFO_ALLOCATOR 230config ARCH_THREAD_STACK_ALLOCATOR
231 bool 231 bool
232 232
233# Select if arch wants to size task_struct dynamically via arch_task_struct_size: 233# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
606 file which provides platform-specific implementations of some 606 file which provides platform-specific implementations of some
607 functions in <linux/hash.h> or fs/namei.c. 607 functions in <linux/hash.h> or fs/namei.c.
608 608
609config ISA_BUS_API
610 def_bool ISA
611
609# 612#
610# ABI hall of shame 613# ABI hall of shame
611# 614#
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index aab14a019c20..c2ebb6f36c9d 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
40static inline pmd_t * 40static inline pmd_t *
41pmd_alloc_one(struct mm_struct *mm, unsigned long address) 41pmd_alloc_one(struct mm_struct *mm, unsigned long address)
42{ 42{
43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
44 return ret; 44 return ret;
45} 45}
46 46
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
53static inline pte_t * 53static inline pte_t *
54pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 54pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
55{ 55{
56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
57 return pte; 57 return pte;
58} 58}
59 59
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0dcbacfdea4b..0d3e59f56974 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
61 def_bool y 61 def_bool y
62 62
63config ARCH_DISCONTIGMEM_ENABLE 63config ARCH_DISCONTIGMEM_ENABLE
64 def_bool y 64 def_bool n
65 65
66config ARCH_FLATMEM_ENABLE 66config ARCH_FLATMEM_ENABLE
67 def_bool y 67 def_bool y
@@ -186,9 +186,6 @@ if SMP
186config ARC_HAS_COH_CACHES 186config ARC_HAS_COH_CACHES
187 def_bool n 187 def_bool n
188 188
189config ARC_HAS_REENTRANT_IRQ_LV2
190 def_bool n
191
192config ARC_MCIP 189config ARC_MCIP
193 bool "ARConnect Multicore IP (MCIP) Support " 190 bool "ARConnect Multicore IP (MCIP) Support "
194 depends on ISA_ARCV2 191 depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
366if ISA_ARCOMPACT 363if ISA_ARCOMPACT
367 364
368config ARC_COMPACT_IRQ_LEVELS 365config ARC_COMPACT_IRQ_LEVELS
369 bool "ARCompact IRQ Priorities: High(2)/Low(1)" 366 bool "Setup Timer IRQ as high Priority"
370 default n 367 default n
371 # Timer HAS to be high priority, for any other high priority config
372 select ARC_IRQ3_LV2
373 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy 368 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
374 depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 369 depends on !SMP
375
376if ARC_COMPACT_IRQ_LEVELS
377
378config ARC_IRQ3_LV2
379 bool
380
381config ARC_IRQ5_LV2
382 bool
383
384config ARC_IRQ6_LV2
385 bool
386
387endif #ARC_COMPACT_IRQ_LEVELS
388 370
389config ARC_FPU_SAVE_RESTORE 371config ARC_FPU_SAVE_RESTORE
390 bool "Enable FPU state persistence across context switch" 372 bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
407 default y 389 default y
408 depends on !ARC_CANT_LLSC 390 depends on !ARC_CANT_LLSC
409 391
410config ARC_STAR_9000923308
411 bool "Workaround for llock/scond livelock"
412 default n
413 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
414
415config ARC_HAS_SWAPE 392config ARC_HAS_SWAPE
416 bool "Insn: SWAPE (endian-swap)" 393 bool "Insn: SWAPE (endian-swap)"
417 default y 394 default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
471 448
472config HIGHMEM 449config HIGHMEM
473 bool "High Memory Support" 450 bool "High Memory Support"
474 select DISCONTIGMEM 451 select ARCH_DISCONTIGMEM_ENABLE
475 help 452 help
476 With ARC 2G:2G address split, only upper 2G is directly addressable by 453 With ARC 2G:2G address split, only upper 2G is directly addressable by
477 kernel. Enable this to potentially allow access to rest of 2G and PAE 454 kernel. Enable this to potentially allow access to rest of 2G and PAE
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 02fabef2891c..d4df6be66d58 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
127 127
128boot := arch/arc/boot 128boot := arch/arc/boot
129 129
130#default target for make without any arguements. 130#default target for make without any arguments.
131KBUILD_IMAGE := bootpImage 131KBUILD_IMAGE := bootpImage
132 132
133all: $(KBUILD_IMAGE) 133all: $(KBUILD_IMAGE)
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 3942634f805a..02410b211433 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -23,8 +23,6 @@
23 23
24 24
25/ { 25/ {
26 clock-frequency = <500000000>; /* 500 MHZ */
27
28 soc100 { 26 soc100 {
29 bus-frequency = <166666666>; 27 bus-frequency = <166666666>;
30 28
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index b0467229a5c4..f9e7686044eb 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -23,8 +23,6 @@
23 23
24 24
25/ { 25/ {
26 clock-frequency = <500000000>; /* 500 MHZ */
27
28 soc100 { 26 soc100 {
29 bus-frequency = <166666666>; 27 bus-frequency = <166666666>;
30 28
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 3e02f152edcb..6ae2c476ad82 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -15,7 +15,6 @@
15 15
16/ { 16/ {
17 compatible = "snps,arc"; 17 compatible = "snps,arc";
18 clock-frequency = <750000000>; /* 750 MHZ */
19 #address-cells = <1>; 18 #address-cells = <1>;
20 #size-cells = <1>; 19 #size-cells = <1>;
21 20
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 378e455a94c4..14df46f141bf 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <90000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 64c94b2860ab..3d6cfa32bf51 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <90000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts
index b89f6c3eb352..1e0d225791c1 100644
--- a/arch/arc/boot/dts/eznps.dts
+++ b/arch/arc/boot/dts/eznps.dts
@@ -18,7 +18,6 @@
18 18
19/ { 19/ {
20 compatible = "ezchip,arc-nps"; 20 compatible = "ezchip,arc-nps";
21 clock-frequency = <83333333>; /* 83.333333 MHZ */
22 #address-cells = <1>; 21 #address-cells = <1>;
23 #size-cells = <1>; 22 #size-cells = <1>;
24 interrupt-parent = <&intc>; 23 interrupt-parent = <&intc>;
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 5d5e373e0ebc..63970513e4ae 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsim"; 13 compatible = "snps,nsim";
14 clock-frequency = <80000000>; /* 80 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index b5b060adce8a..e659a340ca8a 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci"; 13 compatible = "snps,nsimosci";
14 clock-frequency = <20000000>; /* 20 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
@@ -20,7 +19,7 @@
20 /* this is for console on PGU */ 19 /* this is for console on PGU */
21 /* bootargs = "console=tty0 consoleblank=0"; */ 20 /* bootargs = "console=tty0 consoleblank=0"; */
22 /* this is for console on serial */ 21 /* this is for console on serial */
23 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; 22 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
24 }; 23 };
25 24
26 aliases { 25 aliases {
@@ -58,9 +57,17 @@
58 no-loopback-test = <1>; 57 no-loopback-test = <1>;
59 }; 58 };
60 59
61 pgu0: pgu@f9000000 { 60 pguclk: pguclk {
62 compatible = "snps,arcpgufb"; 61 #clock-cells = <0>;
62 compatible = "fixed-clock";
63 clock-frequency = <25175000>;
64 };
65
66 pgu@f9000000 {
67 compatible = "snps,arcpgu";
63 reg = <0xf9000000 0x400>; 68 reg = <0xf9000000 0x400>;
69 clocks = <&pguclk>;
70 clock-names = "pxlclk";
64 }; 71 };
65 72
66 ps2: ps2@f9001000 { 73 ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 325e73090a18..16ce5d65cfde 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -11,7 +11,6 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci_hs"; 13 compatible = "snps,nsimosci_hs";
14 clock-frequency = <20000000>; /* 20 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
@@ -20,7 +19,7 @@
20 /* this is for console on PGU */ 19 /* this is for console on PGU */
21 /* bootargs = "console=tty0 consoleblank=0"; */ 20 /* bootargs = "console=tty0 consoleblank=0"; */
22 /* this is for console on serial */ 21 /* this is for console on serial */
23 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; 22 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
24 }; 23 };
25 24
26 aliases { 25 aliases {
@@ -58,9 +57,17 @@
58 no-loopback-test = <1>; 57 no-loopback-test = <1>;
59 }; 58 };
60 59
61 pgu0: pgu@f9000000 { 60 pguclk: pguclk {
62 compatible = "snps,arcpgufb"; 61 #clock-cells = <0>;
62 compatible = "fixed-clock";
63 clock-frequency = <25175000>;
64 };
65
66 pgu@f9000000 {
67 compatible = "snps,arcpgu";
63 reg = <0xf9000000 0x400>; 68 reg = <0xf9000000 0x400>;
69 clocks = <&pguclk>;
70 clock-names = "pxlclk";
64 }; 71 };
65 72
66 ps2: ps2@f9001000 { 73 ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index ee03d7126581..ce8dfbc30c4d 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -11,14 +11,13 @@
11 11
12/ { 12/ {
13 compatible = "snps,nsimosci_hs"; 13 compatible = "snps,nsimosci_hs";
14 clock-frequency = <5000000>; /* 5 MHZ */
15 #address-cells = <1>; 14 #address-cells = <1>;
16 #size-cells = <1>; 15 #size-cells = <1>;
17 interrupt-parent = <&core_intc>; 16 interrupt-parent = <&core_intc>;
18 17
19 chosen { 18 chosen {
20 /* this is for console on serial */ 19 /* this is for console on serial */
21 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug"; 20 bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
22 }; 21 };
23 22
24 aliases { 23 aliases {
@@ -77,9 +76,17 @@
77 no-loopback-test = <1>; 76 no-loopback-test = <1>;
78 }; 77 };
79 78
80 pgu0: pgu@f9000000 { 79 pguclk: pguclk {
81 compatible = "snps,arcpgufb"; 80 #clock-cells = <0>;
81 compatible = "fixed-clock";
82 clock-frequency = <25175000>;
83 };
84
85 pgu@f9000000 {
86 compatible = "snps,arcpgu";
82 reg = <0xf9000000 0x400>; 87 reg = <0xf9000000 0x400>;
88 clocks = <&pguclk>;
89 clock-names = "pxlclk";
83 }; 90 };
84 91
85 ps2: ps2@f9001000 { 92 ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 3a10cc633e2b..65808fe0a290 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -13,7 +13,6 @@
13 13
14/ { 14/ {
15 compatible = "snps,arc"; 15 compatible = "snps,arc";
16 clock-frequency = <80000000>; /* 80 MHZ */
17 #address-cells = <1>; 16 #address-cells = <1>;
18 #size-cells = <1>; 17 #size-cells = <1>;
19 chosen { }; 18 chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 71fd308a9298..2dfe8037dfbb 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -8,7 +8,6 @@
8 8
9/ { 9/ {
10 compatible = "snps,arc"; 10 compatible = "snps,arc";
11 clock-frequency = <80000000>; /* 80 MHZ */
12 #address-cells = <1>; 11 #address-cells = <1>;
13 #size-cells = <1>; 12 #size-cells = <1>;
14 chosen { }; 13 chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index d1cb25a66989..4c11079f3565 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -8,7 +8,6 @@
8 8
9/ { 9/ {
10 compatible = "snps,arc"; 10 compatible = "snps,arc";
11 clock-frequency = <80000000>; /* 80 MHZ */
12 #address-cells = <1>; 11 #address-cells = <1>;
13 #size-cells = <1>; 12 #size-cells = <1>;
14 chosen { }; 13 chosen { };
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index ad4ee43bd2ac..0fd6ba985b16 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -14,7 +14,6 @@
14 14
15/ { 15/ {
16 compatible = "snps,arc"; 16 compatible = "snps,arc";
17 clock-frequency = <50000000>;
18 #address-cells = <1>; 17 #address-cells = <1>;
19 #size-cells = <1>; 18 #size-cells = <1>;
20 19
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index a3cb6263c581..82214cd7ba0c 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -15,7 +15,6 @@
15 15
16/ { 16/ {
17 compatible = "snps,arc"; 17 compatible = "snps,arc";
18 clock-frequency = <50000000>;
19 #address-cells = <1>; 18 #address-cells = <1>;
20 #size-cells = <1>; 19 #size-cells = <1>;
21 20
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 45cd665fca23..99498a4b4216 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -23,6 +23,11 @@
23 #clock-cells = <0>; 23 #clock-cells = <0>;
24 }; 24 };
25 25
26 pguclk: pguclk {
27 #clock-cells = <0>;
28 compatible = "fixed-clock";
29 clock-frequency = <25175000>;
30 };
26 }; 31 };
27 32
28 ethernet@0x18000 { 33 ethernet@0x18000 {
@@ -75,11 +80,11 @@
75 }; 80 };
76 81
77/* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */ 82/* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */
78 pgu@0x17000 { 83 pgu@17000 {
79 compatible = "snps,arcpgufb"; 84 compatible = "snps,arcpgu";
80 reg = <0x17000 0x400>; 85 reg = <0x17000 0x400>;
81 clock-frequency = <51000000>; /* PGU'clock is initated in init function */ 86 clocks = <&pguclk>;
82 /* interrupts = <5>; PGU interrupts not used, this vector is used for ps2 below */ 87 clock-names = "pxlclk";
83 }; 88 };
84 89
85/* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */ 90/* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */
diff --git a/arch/arc/boot/dts/vdk_hs38_smp.dts b/arch/arc/boot/dts/vdk_hs38_smp.dts
index 031a5bc79b3e..2ba60c399d99 100644
--- a/arch/arc/boot/dts/vdk_hs38_smp.dts
+++ b/arch/arc/boot/dts/vdk_hs38_smp.dts
@@ -16,6 +16,6 @@
16 compatible = "snps,axs103"; 16 compatible = "snps,axs103";
17 17
18 chosen { 18 chosen {
19 bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0"; 19 bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=640x480-24";
20 }; 20 };
21}; 21};
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 42bafa552498..98cf20933bbb 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -58,7 +58,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
58CONFIG_SERIAL_OF_PLATFORM=y 58CONFIG_SERIAL_OF_PLATFORM=y
59# CONFIG_HW_RANDOM is not set 59# CONFIG_HW_RANDOM is not set
60# CONFIG_HWMON is not set 60# CONFIG_HWMON is not set
61CONFIG_FB=y 61CONFIG_DRM=y
62CONFIG_DRM_ARCPGU=y
62CONFIG_FRAMEBUFFER_CONSOLE=y 63CONFIG_FRAMEBUFFER_CONSOLE=y
63CONFIG_LOGO=y 64CONFIG_LOGO=y
64# CONFIG_HID is not set 65# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 4bb60c1cd4a2..ddf8b96d494e 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -57,7 +57,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
57CONFIG_SERIAL_OF_PLATFORM=y 57CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_HW_RANDOM is not set 58# CONFIG_HW_RANDOM is not set
59# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
60CONFIG_FB=y 60CONFIG_DRM=y
61CONFIG_DRM_ARCPGU=y
61CONFIG_FRAMEBUFFER_CONSOLE=y 62CONFIG_FRAMEBUFFER_CONSOLE=y
62CONFIG_LOGO=y 63CONFIG_LOGO=y
63# CONFIG_HID is not set 64# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 7e88f4c720f8..ceb90745326e 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -70,7 +70,8 @@ CONFIG_SERIAL_8250_DW=y
70CONFIG_SERIAL_OF_PLATFORM=y 70CONFIG_SERIAL_OF_PLATFORM=y
71# CONFIG_HW_RANDOM is not set 71# CONFIG_HW_RANDOM is not set
72# CONFIG_HWMON is not set 72# CONFIG_HWMON is not set
73CONFIG_FB=y 73CONFIG_DRM=y
74CONFIG_DRM_ARCPGU=y
74CONFIG_FRAMEBUFFER_CONSOLE=y 75CONFIG_FRAMEBUFFER_CONSOLE=y
75CONFIG_LOGO=y 76CONFIG_LOGO=y
76# CONFIG_HID is not set 77# CONFIG_HID is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 52ec315dc5c9..969b206d6c67 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -63,12 +63,9 @@ CONFIG_SERIAL_8250_DW=y
63CONFIG_SERIAL_OF_PLATFORM=y 63CONFIG_SERIAL_OF_PLATFORM=y
64# CONFIG_HW_RANDOM is not set 64# CONFIG_HW_RANDOM is not set
65# CONFIG_HWMON is not set 65# CONFIG_HWMON is not set
66CONFIG_FB=y 66CONFIG_DRM=y
67CONFIG_ARCPGU_RGB888=y 67CONFIG_DRM_ARCPGU=y
68CONFIG_ARCPGU_DISPTYPE=0
69# CONFIG_VGA_CONSOLE is not set
70CONFIG_FRAMEBUFFER_CONSOLE=y 68CONFIG_FRAMEBUFFER_CONSOLE=y
71CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
72CONFIG_LOGO=y 69CONFIG_LOGO=y
73# CONFIG_LOGO_LINUX_MONO is not set 70# CONFIG_LOGO_LINUX_MONO is not set
74# CONFIG_LOGO_LINUX_VGA16 is not set 71# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5f3dcbbc0cc9..dd683995bc9d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,50 +25,17 @@
25 25
26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27 27
28#ifdef CONFIG_ARC_STAR_9000923308
29
30#define SCOND_FAIL_RETRY_VAR_DEF \
31 unsigned int delay = 1, tmp; \
32
33#define SCOND_FAIL_RETRY_ASM \
34 " bz 4f \n" \
35 " ; --- scond fail delay --- \n" \
36 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
37 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
39 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
40 " b 1b \n" /* start over */ \
41 "4: ; --- success --- \n" \
42
43#define SCOND_FAIL_RETRY_VARS \
44 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
45
46#else /* !CONFIG_ARC_STAR_9000923308 */
47
48#define SCOND_FAIL_RETRY_VAR_DEF
49
50#define SCOND_FAIL_RETRY_ASM \
51 " bnz 1b \n" \
52
53#define SCOND_FAIL_RETRY_VARS
54
55#endif
56
57#define ATOMIC_OP(op, c_op, asm_op) \ 28#define ATOMIC_OP(op, c_op, asm_op) \
58static inline void atomic_##op(int i, atomic_t *v) \ 29static inline void atomic_##op(int i, atomic_t *v) \
59{ \ 30{ \
60 unsigned int val; \ 31 unsigned int val; \
61 SCOND_FAIL_RETRY_VAR_DEF \
62 \ 32 \
63 __asm__ __volatile__( \ 33 __asm__ __volatile__( \
64 "1: llock %[val], [%[ctr]] \n" \ 34 "1: llock %[val], [%[ctr]] \n" \
65 " " #asm_op " %[val], %[val], %[i] \n" \ 35 " " #asm_op " %[val], %[val], %[i] \n" \
66 " scond %[val], [%[ctr]] \n" \ 36 " scond %[val], [%[ctr]] \n" \
67 " \n" \ 37 " bnz 1b \n" \
68 SCOND_FAIL_RETRY_ASM \
69 \
70 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ 38 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
71 SCOND_FAIL_RETRY_VARS \
72 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ 39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
73 [i] "ir" (i) \ 40 [i] "ir" (i) \
74 : "cc"); \ 41 : "cc"); \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
77#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 44#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
78static inline int atomic_##op##_return(int i, atomic_t *v) \ 45static inline int atomic_##op##_return(int i, atomic_t *v) \
79{ \ 46{ \
80 unsigned int val; \ 47 unsigned int val; \
81 SCOND_FAIL_RETRY_VAR_DEF \
82 \ 48 \
83 /* \ 49 /* \
84 * Explicit full memory barrier needed before/after as \ 50 * Explicit full memory barrier needed before/after as \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
90 "1: llock %[val], [%[ctr]] \n" \ 56 "1: llock %[val], [%[ctr]] \n" \
91 " " #asm_op " %[val], %[val], %[i] \n" \ 57 " " #asm_op " %[val], %[val], %[i] \n" \
92 " scond %[val], [%[ctr]] \n" \ 58 " scond %[val], [%[ctr]] \n" \
93 " \n" \ 59 " bnz 1b \n" \
94 SCOND_FAIL_RETRY_ASM \
95 \
96 : [val] "=&r" (val) \ 60 : [val] "=&r" (val) \
97 SCOND_FAIL_RETRY_VARS \
98 : [ctr] "r" (&v->counter), \ 61 : [ctr] "r" (&v->counter), \
99 [i] "ir" (i) \ 62 [i] "ir" (i) \
100 : "cc"); \ 63 : "cc"); \
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index e0e1faf03c50..14c310f2e0b1 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -76,8 +76,8 @@
76 * We need to be a bit more cautious here. What if a kernel bug in 76 * We need to be a bit more cautious here. What if a kernel bug in
77 * L1 ISR, caused SP to go whaco (some small value which looks like 77 * L1 ISR, caused SP to go whaco (some small value which looks like
78 * USER stk) and then we take L2 ISR. 78 * USER stk) and then we take L2 ISR.
79 * Above brlo alone would treat it as a valid L1-L2 sceanrio 79 * Above brlo alone would treat it as a valid L1-L2 scenario
80 * instead of shouting alound 80 * instead of shouting around
81 * The only feasible way is to make sure this L2 happened in 81 * The only feasible way is to make sure this L2 happened in
82 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in 82 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
83 * L1 ISR before it switches stack 83 * L1 ISR before it switches stack
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467ef658f..b0b87f2447f5 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
83 local_flush_tlb_all(); 83 local_flush_tlb_all();
84 84
85 /* 85 /*
86 * Above checke for rollover of 8 bit ASID in 32 bit container. 86 * Above check for rollover of 8 bit ASID in 32 bit container.
87 * If the container itself wrapped around, set it to a non zero 87 * If the container itself wrapped around, set it to a non zero
88 * "generation" to distinguish from no context 88 * "generation" to distinguish from no context
89 */ 89 */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 86ed671286df..3749234b7419 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
95{ 95{
96 pte_t *pte; 96 pte_t *pte;
97 97
98 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 98 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
99 __get_order_pte()); 99 __get_order_pte());
100 100
101 return pte; 101 return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
107 pgtable_t pte_pg; 107 pgtable_t pte_pg;
108 struct page *page; 108 struct page *page;
109 109
110 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); 110 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
111 if (!pte_pg) 111 if (!pte_pg)
112 return 0; 112 return 0;
113 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); 113 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 034bbdc0ff61..858f98ef7f1b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
47 * Page Tables are purely for Linux VM's consumption and the bits below are 47 * Page Tables are purely for Linux VM's consumption and the bits below are
48 * suited to that (uniqueness). Hence some are not implemented in the TLB and 48 * suited to that (uniqueness). Hence some are not implemented in the TLB and
49 * some have different value in TLB. 49 * some have different value in TLB.
50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in 50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
51 * seperate PD0 and PD1, which combined forms a translation entry) 51 * seperate PD0 and PD1, which combined forms a translation entry)
52 * while for PTE perspective, they are 8 and 9 respectively 52 * while for PTE perspective, they are 8 and 9 respectively
53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos 53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index f9048994b22f..16b630fbeb6a 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -78,7 +78,7 @@ struct task_struct;
78#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) 78#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
79 79
80/* 80/*
81 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. 81 * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
82 * Look in process.c for details of kernel stack layout 82 * Look in process.c for details of kernel stack layout
83 */ 83 */
84#define TSK_K_ESP(tsk) (tsk->thread.ksp) 84#define TSK_K_ESP(tsk) (tsk->thread.ksp)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 991380438d6b..89fdd1b0a76e 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
86 * (1) These insn were introduced only in 4.10 release. So for older released 86 * (1) These insn were introduced only in 4.10 release. So for older released
87 * support needed. 87 * support needed.
88 * 88 *
89 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be 89 * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
90 * gaurantted by the platform (not something which core handles). 90 * gaurantted by the platform (not something which core handles).
91 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ 91 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
92 * disabling for atomicity. 92 * disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 800e7c430ca5..cded4a9b5438 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -20,11 +20,6 @@
20 20
21#ifdef CONFIG_ARC_HAS_LLSC 21#ifdef CONFIG_ARC_HAS_LLSC
22 22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
28static inline void arch_spin_lock(arch_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
29{ 24{
30 unsigned int val; 25 unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
238 smp_mb(); 233 smp_mb();
239} 234}
240 235
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */ 236#else /* !CONFIG_ARC_HAS_LLSC */
529 237
530static inline void arch_spin_lock(arch_spinlock_t *lock) 238static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af67455659a..2d79e527fa50 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
103 103
104/* 104/*
105 * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. 105 * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
106 * SYSCALL_TRACE is anways seperately/unconditionally tested right after a 106 * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
107 * syscall, so all that reamins to be tested is _TIF_WORK_MASK 107 * syscall, so all that reamins to be tested is _TIF_WORK_MASK
108 */ 108 */
109 109
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da6032b715..a78d5670884f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
33 33
34/* 34/*
35 * Algorthmically, for __user_ok() we want do: 35 * Algorithmically, for __user_ok() we want do:
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE) 36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or 37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code. 38 * emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a73195..71f3918b0fc3 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
74 __tmp ^ __in; \ 74 __tmp ^ __in; \
75}) 75})
76 76
77#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ 77#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
78 78
79#define __arch_swab32(x) \ 79#define __arch_swab32(x) \
80({ \ 80({ \
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 0cb0abaa0479..98812c1248df 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
91VECTOR instr_service ; 0x10, Instrn Error (0x2) 91VECTOR instr_service ; 0x10, Instrn Error (0x2)
92 92
93; ******************** Device ISRs ********************** 93; ******************** Device ISRs **********************
94#ifdef CONFIG_ARC_IRQ3_LV2 94#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
95VECTOR handle_interrupt_level2
96#else
97VECTOR handle_interrupt_level1
98#endif
99
100VECTOR handle_interrupt_level1
101
102#ifdef CONFIG_ARC_IRQ5_LV2
103VECTOR handle_interrupt_level2
104#else
105VECTOR handle_interrupt_level1
106#endif
107
108#ifdef CONFIG_ARC_IRQ6_LV2
109VECTOR handle_interrupt_level2 95VECTOR handle_interrupt_level2
110#else 96#else
111VECTOR handle_interrupt_level1 97VECTOR handle_interrupt_level1
112#endif 98#endif
113 99
114.rept 25 100.rept 28
115VECTOR handle_interrupt_level1 ; Other devices 101VECTOR handle_interrupt_level1 ; Other devices
116.endr 102.endr
117 103
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index c5cceca36118..ce9deb953ca9 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
28{ 28{
29 int level_mask = 0; 29 int level_mask = 0;
30 30
31 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 31 /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
32 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; 32 level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
33 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
34 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
35 33
36 /* 34 /*
37 * Write to register, even if no LV2 IRQs configured to reset it 35 * Write to register, even if no LV2 IRQs configured to reset it
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 6fd48021324b..08f03d9b5b3e 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
108 int64_t delta = new_raw_count - prev_raw_count; 108 int64_t delta = new_raw_count - prev_raw_count;
109 109
110 /* 110 /*
111 * We don't afaraid of hwc->prev_count changing beneath our feet 111 * We aren't afraid of hwc->prev_count changing beneath our feet
112 * because there's no way for us to re-enter this function anytime. 112 * because there's no way for us to re-enter this function anytime.
113 */ 113 */
114 local64_set(&hwc->prev_count, new_raw_count); 114 local64_set(&hwc->prev_count, new_raw_count);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index f63b8bfefb0c..2ee7a4d758a8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
392 /* 392 /*
393 * If we are here, it is established that @uboot_arg didn't 393 * If we are here, it is established that @uboot_arg didn't
394 * point to DT blob. Instead if u-boot says it is cmdline, 394 * point to DT blob. Instead if u-boot says it is cmdline,
395 * Appent to embedded DT cmdline. 395 * append to embedded DT cmdline.
396 * setup_machine_fdt() would have populated @boot_command_line 396 * setup_machine_fdt() would have populated @boot_command_line
397 */ 397 */
398 if (uboot_tag == 1) { 398 if (uboot_tag == 1) {
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 004b7f0bc76c..6cb3736b6b83 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -34,7 +34,7 @@
34 * -ViXS were still seeing crashes when using insmod to load drivers. 34 * -ViXS were still seeing crashes when using insmod to load drivers.
35 * It turned out that the code to change Execute permssions for TLB entries 35 * It turned out that the code to change Execute permssions for TLB entries
36 * of user was not guarded for interrupts (mod_tlb_permission) 36 * of user was not guarded for interrupts (mod_tlb_permission)
37 * This was cauing TLB entries to be overwritten on unrelated indexes 37 * This was causing TLB entries to be overwritten on unrelated indexes
38 * 38 *
39 * Vineetg: July 15th 2008: Bug #94183 39 * Vineetg: July 15th 2008: Bug #94183
40 * -Exception happens in Delay slot of a JMP, and before user space resumes, 40 * -Exception happens in Delay slot of a JMP, and before user space resumes,
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a6f91e88ce36..934150e7ac48 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
276 return 0; 276 return 0;
277} 277}
278 278
279/* called on user read(): display the couters */ 279/* called on user read(): display the counters */
280static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ 280static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
281 char __user *user_buf, /* user buffer */ 281 char __user *user_buf, /* user buffer */
282 size_t len, /* length of buffer */ 282 size_t len, /* length of buffer */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9e5eddbb856f..5a294b2c3cb3 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -215,7 +215,7 @@ slc_chk:
215 * ------------------ 215 * ------------------
216 * This ver of MMU supports variable page sizes (1k-16k): although Linux will 216 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
217 * only support 8k (default), 16k and 4k. 217 * only support 8k (default), 16k and 4k.
218 * However from hardware perspective, smaller page sizes aggrevate aliasing 218 * However from hardware perspective, smaller page sizes aggravate aliasing
219 * meaning more vaddr bits needed to disambiguate the cache-line-op ; 219 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
220 * the existing scheme of piggybacking won't work for certain configurations. 220 * the existing scheme of piggybacking won't work for certain configurations.
221 * Two new registers IC_PTAG and DC_PTAG inttoduced. 221 * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
302 302
303 /* 303 /*
304 * This is technically for MMU v4, using the MMU v3 programming model 304 * This is technically for MMU v4, using the MMU v3 programming model
305 * Special work for HS38 aliasing I-cache configuratino with PAE40 305 * Special work for HS38 aliasing I-cache configuration with PAE40
306 * - upper 8 bits of paddr need to be written into PTAG_HI 306 * - upper 8 bits of paddr need to be written into PTAG_HI
307 * - (and needs to be written before the lower 32 bits) 307 * - (and needs to be written before the lower 32 bits)
308 * Note that PTAG_HI is hoisted outside the line loop 308 * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
936 ic->ver, CONFIG_ARC_MMU_VER); 936 ic->ver, CONFIG_ARC_MMU_VER);
937 937
938 /* 938 /*
939 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG 939 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
940 * pair to provide vaddr/paddr respectively, just as in MMU v3 940 * pair to provide vaddr/paddr respectively, just as in MMU v3
941 */ 941 */
942 if (is_isa_arcv2() && ic->alias) 942 if (is_isa_arcv2() && ic->alias)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c8e36fa5659..73d7e4c75b7d 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -10,7 +10,7 @@
10 * DMA Coherent API Notes 10 * DMA Coherent API Notes
11 * 11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is 12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with 13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry. 14 * Cache bit off in the TLB entry.
15 * 15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based. 16 * The default DMA address == Phy address which is 0x8000_0000 based.
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 06b6c2d695bf..414b42710a36 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
741 sun7i-a20-olimex-som-evb.dtb \ 741 sun7i-a20-olimex-som-evb.dtb \
742 sun7i-a20-olinuxino-lime.dtb \ 742 sun7i-a20-olinuxino-lime.dtb \
743 sun7i-a20-olinuxino-lime2.dtb \ 743 sun7i-a20-olinuxino-lime2.dtb \
744 sun7i-a20-olinuxino-lime2-emmc.dtb \
744 sun7i-a20-olinuxino-micro.dtb \ 745 sun7i-a20-olinuxino-micro.dtb \
745 sun7i-a20-orangepi.dtb \ 746 sun7i-a20-orangepi.dtb \
746 sun7i-a20-orangepi-mini.dtb \ 747 sun7i-a20-orangepi-mini.dtb \
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index d82dd6e3f9b1..5687d6b4da60 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -418,7 +418,7 @@
418 status = "okay"; 418 status = "okay";
419 pinctrl-names = "default"; 419 pinctrl-names = "default";
420 pinctrl-0 = <&i2c0_pins>; 420 pinctrl-0 = <&i2c0_pins>;
421 clock-frequency = <400000>; 421 clock-frequency = <100000>;
422 422
423 tps@24 { 423 tps@24 {
424 compatible = "ti,tps65218"; 424 compatible = "ti,tps65218";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index b01a5948cdd0..0e63b9dff6e7 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -60,10 +60,26 @@
60 60
61 tps659038_pmic { 61 tps659038_pmic {
62 compatible = "ti,tps659038-pmic"; 62 compatible = "ti,tps659038-pmic";
63
64 smps12-in-supply = <&vmain>;
65 smps3-in-supply = <&vmain>;
66 smps45-in-supply = <&vmain>;
67 smps6-in-supply = <&vmain>;
68 smps7-in-supply = <&vmain>;
69 smps8-in-supply = <&vmain>;
70 smps9-in-supply = <&vmain>;
71 ldo1-in-supply = <&vmain>;
72 ldo2-in-supply = <&vmain>;
73 ldo3-in-supply = <&vmain>;
74 ldo4-in-supply = <&vmain>;
75 ldo9-in-supply = <&vmain>;
76 ldoln-in-supply = <&vmain>;
77 ldousb-in-supply = <&vmain>;
78 ldortc-in-supply = <&vmain>;
79
63 regulators { 80 regulators {
64 smps12_reg: smps12 { 81 smps12_reg: smps12 {
65 /* VDD_MPU */ 82 /* VDD_MPU */
66 vin-supply = <&vmain>;
67 regulator-name = "smps12"; 83 regulator-name = "smps12";
68 regulator-min-microvolt = <850000>; 84 regulator-min-microvolt = <850000>;
69 regulator-max-microvolt = <1250000>; 85 regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@
73 89
74 smps3_reg: smps3 { 90 smps3_reg: smps3 {
75 /* VDD_DDR EMIF1 EMIF2 */ 91 /* VDD_DDR EMIF1 EMIF2 */
76 vin-supply = <&vmain>;
77 regulator-name = "smps3"; 92 regulator-name = "smps3";
78 regulator-min-microvolt = <1350000>; 93 regulator-min-microvolt = <1350000>;
79 regulator-max-microvolt = <1350000>; 94 regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@
84 smps45_reg: smps45 { 99 smps45_reg: smps45 {
85 /* VDD_DSPEVE on AM572 */ 100 /* VDD_DSPEVE on AM572 */
86 /* VDD_IVA + VDD_DSP on AM571 */ 101 /* VDD_IVA + VDD_DSP on AM571 */
87 vin-supply = <&vmain>;
88 regulator-name = "smps45"; 102 regulator-name = "smps45";
89 regulator-min-microvolt = <850000>; 103 regulator-min-microvolt = <850000>;
90 regulator-max-microvolt = <1250000>; 104 regulator-max-microvolt = <1250000>;
@@ -94,7 +108,6 @@
94 108
95 smps6_reg: smps6 { 109 smps6_reg: smps6 {
96 /* VDD_GPU */ 110 /* VDD_GPU */
97 vin-supply = <&vmain>;
98 regulator-name = "smps6"; 111 regulator-name = "smps6";
99 regulator-min-microvolt = <850000>; 112 regulator-min-microvolt = <850000>;
100 regulator-max-microvolt = <1250000>; 113 regulator-max-microvolt = <1250000>;
@@ -104,7 +117,6 @@
104 117
105 smps7_reg: smps7 { 118 smps7_reg: smps7 {
106 /* VDD_CORE */ 119 /* VDD_CORE */
107 vin-supply = <&vmain>;
108 regulator-name = "smps7"; 120 regulator-name = "smps7";
109 regulator-min-microvolt = <850000>; 121 regulator-min-microvolt = <850000>;
110 regulator-max-microvolt = <1150000>; 122 regulator-max-microvolt = <1150000>;
@@ -115,13 +127,11 @@
115 smps8_reg: smps8 { 127 smps8_reg: smps8 {
116 /* 5728 - VDD_IVAHD */ 128 /* 5728 - VDD_IVAHD */
117 /* 5718 - N.C. test point */ 129 /* 5718 - N.C. test point */
118 vin-supply = <&vmain>;
119 regulator-name = "smps8"; 130 regulator-name = "smps8";
120 }; 131 };
121 132
122 smps9_reg: smps9 { 133 smps9_reg: smps9 {
123 /* VDD_3_3D */ 134 /* VDD_3_3D */
124 vin-supply = <&vmain>;
125 regulator-name = "smps9"; 135 regulator-name = "smps9";
126 regulator-min-microvolt = <3300000>; 136 regulator-min-microvolt = <3300000>;
127 regulator-max-microvolt = <3300000>; 137 regulator-max-microvolt = <3300000>;
@@ -132,7 +142,6 @@
132 ldo1_reg: ldo1 { 142 ldo1_reg: ldo1 {
133 /* VDDSHV8 - VSDMMC */ 143 /* VDDSHV8 - VSDMMC */
134 /* NOTE: on rev 1.3a, data supply */ 144 /* NOTE: on rev 1.3a, data supply */
135 vin-supply = <&vmain>;
136 regulator-name = "ldo1"; 145 regulator-name = "ldo1";
137 regulator-min-microvolt = <1800000>; 146 regulator-min-microvolt = <1800000>;
138 regulator-max-microvolt = <3300000>; 147 regulator-max-microvolt = <3300000>;
@@ -142,7 +151,6 @@
142 151
143 ldo2_reg: ldo2 { 152 ldo2_reg: ldo2 {
144 /* VDDSH18V */ 153 /* VDDSH18V */
145 vin-supply = <&vmain>;
146 regulator-name = "ldo2"; 154 regulator-name = "ldo2";
147 regulator-min-microvolt = <1800000>; 155 regulator-min-microvolt = <1800000>;
148 regulator-max-microvolt = <1800000>; 156 regulator-max-microvolt = <1800000>;
@@ -152,7 +160,6 @@
152 160
153 ldo3_reg: ldo3 { 161 ldo3_reg: ldo3 {
154 /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */ 162 /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
155 vin-supply = <&vmain>;
156 regulator-name = "ldo3"; 163 regulator-name = "ldo3";
157 regulator-min-microvolt = <1800000>; 164 regulator-min-microvolt = <1800000>;
158 regulator-max-microvolt = <1800000>; 165 regulator-max-microvolt = <1800000>;
@@ -162,7 +169,6 @@
162 169
163 ldo4_reg: ldo4 { 170 ldo4_reg: ldo4 {
164 /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/ 171 /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
165 vin-supply = <&vmain>;
166 regulator-name = "ldo4"; 172 regulator-name = "ldo4";
167 regulator-min-microvolt = <1800000>; 173 regulator-min-microvolt = <1800000>;
168 regulator-max-microvolt = <1800000>; 174 regulator-max-microvolt = <1800000>;
@@ -174,7 +180,6 @@
174 180
175 ldo9_reg: ldo9 { 181 ldo9_reg: ldo9 {
176 /* VDD_RTC */ 182 /* VDD_RTC */
177 vin-supply = <&vmain>;
178 regulator-name = "ldo9"; 183 regulator-name = "ldo9";
179 regulator-min-microvolt = <840000>; 184 regulator-min-microvolt = <840000>;
180 regulator-max-microvolt = <1160000>; 185 regulator-max-microvolt = <1160000>;
@@ -184,7 +189,6 @@
184 189
185 ldoln_reg: ldoln { 190 ldoln_reg: ldoln {
186 /* VDDA_1V8_PLL */ 191 /* VDDA_1V8_PLL */
187 vin-supply = <&vmain>;
188 regulator-name = "ldoln"; 192 regulator-name = "ldoln";
189 regulator-min-microvolt = <1800000>; 193 regulator-min-microvolt = <1800000>;
190 regulator-max-microvolt = <1800000>; 194 regulator-max-microvolt = <1800000>;
@@ -194,7 +198,6 @@
194 198
195 ldousb_reg: ldousb { 199 ldousb_reg: ldousb {
196 /* VDDA_3V_USB: VDDA_USBHS33 */ 200 /* VDDA_3V_USB: VDDA_USBHS33 */
197 vin-supply = <&vmain>;
198 regulator-name = "ldousb"; 201 regulator-name = "ldousb";
199 regulator-min-microvolt = <3300000>; 202 regulator-min-microvolt = <3300000>;
200 regulator-max-microvolt = <3300000>; 203 regulator-max-microvolt = <3300000>;
@@ -204,7 +207,6 @@
204 207
205 ldortc_reg: ldortc { 208 ldortc_reg: ldortc {
206 /* VDDA_RTC */ 209 /* VDDA_RTC */
207 vin-supply = <&vmain>;
208 regulator-name = "ldortc"; 210 regulator-name = "ldortc";
209 regulator-min-microvolt = <1800000>; 211 regulator-min-microvolt = <1800000>;
210 regulator-max-microvolt = <1800000>; 212 regulator-max-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index cbc17b0794b1..4128fa91823c 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -93,6 +93,10 @@
93 }; 93 };
94}; 94};
95 95
96&mmc1 {
97 status = "disabled";
98};
99
96&mmc2 { 100&mmc2 {
97 pinctrl-names = "default"; 101 pinctrl-names = "default";
98 pinctrl-0 = <&sd1_pins>; 102 pinctrl-0 = <&sd1_pins>;
@@ -101,6 +105,10 @@
101 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; 105 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
102}; 106};
103 107
108&mmc3 {
109 status = "disabled";
110};
111
104&pincntl { 112&pincntl {
105 sd1_pins: pinmux_sd1_pins { 113 sd1_pins: pinmux_sd1_pins {
106 pinctrl-single,pins = < 114 pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 5d4313fd5a46..3f184863e0c5 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -45,6 +45,14 @@
45 phy-mode = "rgmii"; 45 phy-mode = "rgmii";
46}; 46};
47 47
48&mmc1 {
49 status = "disabled";
50};
51
52&mmc2 {
53 status = "disabled";
54};
55
48&mmc3 { 56&mmc3 {
49 pinctrl-names = "default"; 57 pinctrl-names = "default";
50 pinctrl-0 = <&sd2_pins>; 58 pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@
53 dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */ 61 dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */
54 &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */ 62 &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */
55 dma-names = "tx", "rx"; 63 dma-names = "tx", "rx";
64 non-removable;
56}; 65};
57 66
58&pincntl { 67&pincntl {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index e0074014385a..3a8f3976f6f9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1451,6 +1451,8 @@
1451 ti,hwmods = "gpmc"; 1451 ti,hwmods = "gpmc";
1452 reg = <0x50000000 0x37c>; /* device IO registers */ 1452 reg = <0x50000000 0x37c>; /* device IO registers */
1453 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 1453 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
1454 dmas = <&edma_xbar 4 0>;
1455 dma-names = "rxtx";
1454 gpmc,num-cs = <8>; 1456 gpmc,num-cs = <8>;
1455 gpmc,num-waitpins = <2>; 1457 gpmc,num-waitpins = <2>;
1456 #address-cells = <2>; 1458 #address-cells = <2>;
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 4220eeffc65a..5e06020f450b 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -107,8 +107,8 @@
107 reg = <0x58000000 0x80>, 107 reg = <0x58000000 0x80>,
108 <0x58004054 0x4>, 108 <0x58004054 0x4>,
109 <0x58004300 0x20>, 109 <0x58004300 0x20>,
110 <0x58005054 0x4>, 110 <0x58009054 0x4>,
111 <0x58005300 0x20>; 111 <0x58009300 0x20>;
112 reg-names = "dss", "pll1_clkctrl", "pll1", 112 reg-names = "dss", "pll1_clkctrl", "pll1",
113 "pll2_clkctrl", "pll2"; 113 "pll2_clkctrl", "pll2";
114 114
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index ddfe1f558c10..fa14f77df563 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -242,7 +242,7 @@
242 hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>; 242 hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
243 243
244 ports { 244 ports {
245 port0 { 245 port {
246 dp_out: endpoint { 246 dp_out: endpoint {
247 remote-endpoint = <&bridge_in>; 247 remote-endpoint = <&bridge_in>;
248 }; 248 };
@@ -485,13 +485,20 @@
485 edid-emulation = <5>; 485 edid-emulation = <5>;
486 486
487 ports { 487 ports {
488 port0 { 488 #address-cells = <1>;
489 #size-cells = <0>;
490
491 port@0 {
492 reg = <0>;
493
489 bridge_out: endpoint { 494 bridge_out: endpoint {
490 remote-endpoint = <&panel_in>; 495 remote-endpoint = <&panel_in>;
491 }; 496 };
492 }; 497 };
493 498
494 port1 { 499 port@1 {
500 reg = <1>;
501
495 bridge_in: endpoint { 502 bridge_in: endpoint {
496 remote-endpoint = <&dp_out>; 503 remote-endpoint = <&dp_out>;
497 }; 504 };
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index f9d2e4f1a0e0..1de972d46a87 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -163,7 +163,7 @@
163 hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; 163 hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
164 164
165 ports { 165 ports {
166 port0 { 166 port {
167 dp_out: endpoint { 167 dp_out: endpoint {
168 remote-endpoint = <&bridge_in>; 168 remote-endpoint = <&bridge_in>;
169 }; 169 };
@@ -631,13 +631,20 @@
631 use-external-pwm; 631 use-external-pwm;
632 632
633 ports { 633 ports {
634 port0 { 634 #address-cells = <1>;
635 #size-cells = <0>;
636
637 port@0 {
638 reg = <0>;
639
635 bridge_out: endpoint { 640 bridge_out: endpoint {
636 remote-endpoint = <&panel_in>; 641 remote-endpoint = <&panel_in>;
637 }; 642 };
638 }; 643 };
639 644
640 port1 { 645 port@1 {
646 reg = <1>;
647
641 bridge_in: endpoint { 648 bridge_in: endpoint {
642 remote-endpoint = <&dp_out>; 649 remote-endpoint = <&dp_out>;
643 }; 650 };
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 76056ba92ced..ed449827c3d3 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -85,7 +85,7 @@
85 OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */ 85 OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
86 OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */ 86 OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
87 OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */ 87 OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
88 OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ 88 OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
89 OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ 89 OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
90 OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ 90 OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
91 >; 91 >;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 41f5d386f21f..f4f2ce46d681 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -188,6 +188,7 @@
188 vmmc-supply = <&vmmc1>; 188 vmmc-supply = <&vmmc1>;
189 vmmc_aux-supply = <&vsim>; 189 vmmc_aux-supply = <&vsim>;
190 bus-width = <4>; 190 bus-width = <4>;
191 cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
191}; 192};
192 193
193&mmc3 { 194&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index d6f839cab649..b6971060648a 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -194,6 +194,12 @@
194 OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */ 194 OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
195 >; 195 >;
196 }; 196 };
197
198 mmc1_wp_pins: pinmux_mmc1_cd_pins {
199 pinctrl-single,pins = <
200 OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
201 >;
202 };
197}; 203};
198 204
199&i2c3 { 205&i2c3 {
@@ -250,3 +256,8 @@
250 }; 256 };
251 }; 257 };
252}; 258};
259
260&mmc1 {
261 pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
262 wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
263};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index d9e2d9c6e999..2b74a81d1de2 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -288,7 +288,7 @@
288 pinctrl-single,pins = < 288 pinctrl-single,pins = <
289 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ 289 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
290 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ 290 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
291 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 291 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
292 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ 292 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
293 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ 293 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
294 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ 294 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
@@ -300,7 +300,7 @@
300 modem_pins: pinmux_modem { 300 modem_pins: pinmux_modem {
301 pinctrl-single,pins = < 301 pinctrl-single,pins = <
302 OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */ 302 OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */
303 OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */ 303 OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */
304 OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */ 304 OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */
305 OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */ 305 OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */
306 OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */ 306 OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index a00ca761675d..927b17fc4ed8 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -97,7 +97,7 @@
97 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ 97 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
98 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ 98 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
99 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ 99 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
100 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 100 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
101 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ 101 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
102 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ 102 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
103 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */ 103 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */
@@ -110,7 +110,7 @@
110 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */ 110 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */
111 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */ 111 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */
112 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */ 112 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */
113 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 113 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
114 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */ 114 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */
115 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */ 115 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */
116 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */ 116 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */
@@ -120,7 +120,7 @@
120 120
121 modem_pins1: pinmux_modem_core1_pins { 121 modem_pins1: pinmux_modem_core1_pins {
122 pinctrl-single,pins = < 122 pinctrl-single,pins = <
123 OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */ 123 OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
124 OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */ 124 OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */
125 OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */ 125 OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
126 >; 126 >;
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index f19170bdcc1f..c29b41dc7b95 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -98,7 +98,7 @@
98 pinctrl-single,pins = < 98 pinctrl-single,pins = <
99 OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ 99 OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
100 OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ 100 OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
101 OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ 101 OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
102 OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ 102 OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
103 >; 103 >;
104 }; 104 };
@@ -107,7 +107,7 @@
107 pinctrl-single,pins = < 107 pinctrl-single,pins = <
108 OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ 108 OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
109 OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ 109 OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
110 OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ 110 OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
111 OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ 111 OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
112 >; 112 >;
113 }; 113 };
@@ -125,7 +125,7 @@
125 pinctrl-single,pins = < 125 pinctrl-single,pins = <
126 OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */ 126 OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
127 OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */ 127 OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
128 OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */ 128 OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
129 OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */ 129 OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
130 OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */ 130 OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
131 >; 131 >;
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index dc759a3028b7..5d5b620b7d9b 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -14,6 +14,29 @@
14 display0 = &hdmi0; 14 display0 = &hdmi0;
15 }; 15 };
16 16
17 vmain: fixedregulator-vmain {
18 compatible = "regulator-fixed";
19 regulator-name = "vmain";
20 regulator-min-microvolt = <5000000>;
21 regulator-max-microvolt = <5000000>;
22 };
23
24 vsys_cobra: fixedregulator-vsys_cobra {
25 compatible = "regulator-fixed";
26 regulator-name = "vsys_cobra";
27 vin-supply = <&vmain>;
28 regulator-min-microvolt = <5000000>;
29 regulator-max-microvolt = <5000000>;
30 };
31
32 vdds_1v8_main: fixedregulator-vdds_1v8_main {
33 compatible = "regulator-fixed";
34 regulator-name = "vdds_1v8_main";
35 vin-supply = <&smps7_reg>;
36 regulator-min-microvolt = <1800000>;
37 regulator-max-microvolt = <1800000>;
38 };
39
17 vmmcsd_fixed: fixedregulator-mmcsd { 40 vmmcsd_fixed: fixedregulator-mmcsd {
18 compatible = "regulator-fixed"; 41 compatible = "regulator-fixed";
19 regulator-name = "vmmcsd_fixed"; 42 regulator-name = "vmmcsd_fixed";
@@ -309,7 +332,7 @@
309 332
310 wlcore_irq_pin: pinmux_wlcore_irq_pin { 333 wlcore_irq_pin: pinmux_wlcore_irq_pin {
311 pinctrl-single,pins = < 334 pinctrl-single,pins = <
312 OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */ 335 OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
313 >; 336 >;
314 }; 337 };
315}; 338};
@@ -409,6 +432,26 @@
409 432
410 ti,ldo6-vibrator; 433 ti,ldo6-vibrator;
411 434
435 smps123-in-supply = <&vsys_cobra>;
436 smps45-in-supply = <&vsys_cobra>;
437 smps6-in-supply = <&vsys_cobra>;
438 smps7-in-supply = <&vsys_cobra>;
439 smps8-in-supply = <&vsys_cobra>;
440 smps9-in-supply = <&vsys_cobra>;
441 smps10_out2-in-supply = <&vsys_cobra>;
442 smps10_out1-in-supply = <&vsys_cobra>;
443 ldo1-in-supply = <&vsys_cobra>;
444 ldo2-in-supply = <&vsys_cobra>;
445 ldo3-in-supply = <&vdds_1v8_main>;
446 ldo4-in-supply = <&vdds_1v8_main>;
447 ldo5-in-supply = <&vsys_cobra>;
448 ldo6-in-supply = <&vdds_1v8_main>;
449 ldo7-in-supply = <&vsys_cobra>;
450 ldo8-in-supply = <&vsys_cobra>;
451 ldo9-in-supply = <&vmmcsd_fixed>;
452 ldoln-in-supply = <&vsys_cobra>;
453 ldousb-in-supply = <&vsys_cobra>;
454
412 regulators { 455 regulators {
413 smps123_reg: smps123 { 456 smps123_reg: smps123 {
414 /* VDD_OPP_MPU */ 457 /* VDD_OPP_MPU */
@@ -600,7 +643,8 @@
600 pinctrl-0 = <&twl6040_pins>; 643 pinctrl-0 = <&twl6040_pins>;
601 644
602 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ 645 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
603 ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */ 646
647 /* audpwron gpio defined in the board specific dts */
604 648
605 vio-supply = <&smps7_reg>; 649 vio-supply = <&smps7_reg>;
606 v2v1-supply = <&smps9_reg>; 650 v2v1-supply = <&smps9_reg>;
diff --git a/arch/arm/boot/dts/omap5-igep0050.dts b/arch/arm/boot/dts/omap5-igep0050.dts
index 46ecb1dd3b5c..f75ce02fb398 100644
--- a/arch/arm/boot/dts/omap5-igep0050.dts
+++ b/arch/arm/boot/dts/omap5-igep0050.dts
@@ -35,6 +35,22 @@
35 }; 35 };
36}; 36};
37 37
38/* LDO4 is VPP1 - ball AD9 */
39&ldo4_reg {
40 regulator-min-microvolt = <2000000>;
41 regulator-max-microvolt = <2000000>;
42};
43
44/*
45 * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
46 * VDDA_HDMI - ball AN25
47 */
48&ldo7_reg {
49 status = "okay";
50 regulator-min-microvolt = <1800000>;
51 regulator-max-microvolt = <1800000>;
52};
53
38&omap5_pmx_core { 54&omap5_pmx_core {
39 i2c4_pins: pinmux_i2c4_pins { 55 i2c4_pins: pinmux_i2c4_pins {
40 pinctrl-single,pins = < 56 pinctrl-single,pins = <
@@ -52,3 +68,13 @@
52 <&gpio7 3 0>; /* 195, SDA */ 68 <&gpio7 3 0>; /* 195, SDA */
53}; 69};
54 70
71&twl6040 {
72 ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */
73};
74
75&twl6040_pins {
76 pinctrl-single,pins = <
77 OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */
78 OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
79 >;
80};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 60b3fbb3bf07..a51e60518eb6 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -51,3 +51,13 @@
51 <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */ 51 <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */
52 <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */ 52 <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */
53}; 53};
54
55&twl6040 {
56 ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
57};
58
59&twl6040_pins {
60 pinctrl-single,pins = <
61 OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
62 >;
63};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index a3601e4c0a2e..b844473601d2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -136,6 +136,7 @@
136&gmac1 { 136&gmac1 {
137 status = "okay"; 137 status = "okay";
138 phy-mode = "rgmii"; 138 phy-mode = "rgmii";
139 phy-handle = <&phy1>;
139 140
140 snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>; 141 snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
141 snps,reset-active-low; 142 snps,reset-active-low;
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index ad8ba10764a3..d294e82447a2 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -24,18 +24,21 @@
24 compatible = "shared-dma-pool"; 24 compatible = "shared-dma-pool";
25 reg = <0x40000000 0x01000000>; 25 reg = <0x40000000 0x01000000>;
26 no-map; 26 no-map;
27 status = "disabled";
27 }; 28 };
28 29
29 gp1_reserved: rproc@41000000 { 30 gp1_reserved: rproc@41000000 {
30 compatible = "shared-dma-pool"; 31 compatible = "shared-dma-pool";
31 reg = <0x41000000 0x01000000>; 32 reg = <0x41000000 0x01000000>;
32 no-map; 33 no-map;
34 status = "disabled";
33 }; 35 };
34 36
35 audio_reserved: rproc@42000000 { 37 audio_reserved: rproc@42000000 {
36 compatible = "shared-dma-pool"; 38 compatible = "shared-dma-pool";
37 reg = <0x42000000 0x01000000>; 39 reg = <0x42000000 0x01000000>;
38 no-map; 40 no-map;
41 status = "disabled";
39 }; 42 };
40 43
41 dmu_reserved: rproc@43000000 { 44 dmu_reserved: rproc@43000000 {
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 68b479b8772c..73c133f5e79c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -176,8 +176,6 @@
176}; 176};
177 177
178&reg_dc1sw { 178&reg_dc1sw {
179 regulator-min-microvolt = <3000000>;
180 regulator-max-microvolt = <3000000>;
181 regulator-name = "vcc-lcd"; 179 regulator-name = "vcc-lcd";
182}; 180};
183 181
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index 360adfb1e9ca..d6ad6196a768 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -135,8 +135,6 @@
135 135
136&reg_dc1sw { 136&reg_dc1sw {
137 regulator-name = "vcc-lcd-usb2"; 137 regulator-name = "vcc-lcd-usb2";
138 regulator-min-microvolt = <3000000>;
139 regulator-max-microvolt = <3000000>;
140}; 138};
141 139
142&reg_dc5ldo { 140&reg_dc5ldo {
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 10f49ab5328e..47195e8690b4 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
82CONFIG_INPUT_MISC=y 82CONFIG_INPUT_MISC=y
83CONFIG_INPUT_MAX77693_HAPTIC=y 83CONFIG_INPUT_MAX77693_HAPTIC=y
84CONFIG_INPUT_MAX8997_HAPTIC=y 84CONFIG_INPUT_MAX8997_HAPTIC=y
85CONFIG_KEYBOARD_SAMSUNG=y
85CONFIG_SERIAL_8250=y 86CONFIG_SERIAL_8250=y
86CONFIG_SERIAL_SAMSUNG=y 87CONFIG_SERIAL_SAMSUNG=y
87CONFIG_SERIAL_SAMSUNG_CONSOLE=y 88CONFIG_SERIAL_SAMSUNG_CONSOLE=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 8f857564657f..8a5fff1b7f6f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
264CONFIG_KEYBOARD_SPEAR=y 264CONFIG_KEYBOARD_SPEAR=y
265CONFIG_KEYBOARD_ST_KEYSCAN=y 265CONFIG_KEYBOARD_ST_KEYSCAN=y
266CONFIG_KEYBOARD_CROS_EC=m 266CONFIG_KEYBOARD_CROS_EC=m
267CONFIG_KEYBOARD_SAMSUNG=m
267CONFIG_MOUSE_PS2_ELANTECH=y 268CONFIG_MOUSE_PS2_ELANTECH=y
268CONFIG_MOUSE_CYAPA=m 269CONFIG_MOUSE_CYAPA=m
269CONFIG_MOUSE_ELAN_I2C=y 270CONFIG_MOUSE_ELAN_I2C=y
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 19cfab526d13..20febb368844 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
29 29
30static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 30static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
31{ 31{
32 return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); 32 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
33} 33}
34 34
35static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 35static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index aeddd28b3595..92fd2c8a9af0 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
193 193
194#define pmd_large(pmd) (pmd_val(pmd) & 2) 194#define pmd_large(pmd) (pmd_val(pmd) & 2)
195#define pmd_bad(pmd) (pmd_val(pmd) & 2) 195#define pmd_bad(pmd) (pmd_val(pmd) & 2)
196#define pmd_present(pmd) (pmd_val(pmd))
196 197
197#define copy_pmd(pmdpd,pmdps) \ 198#define copy_pmd(pmdpd,pmdps) \
198 do { \ 199 do { \
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fa70db7c714b..2a029bceaf2f 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
211 : !!(pmd_val(pmd) & (val))) 211 : !!(pmd_val(pmd) & (val)))
212#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) 212#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
213 213
214#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
214#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) 215#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
215#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) 216#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
216static inline pte_t pte_mkspecial(pte_t pte) 217static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
249#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 250#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
250#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 251#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
251 252
252/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ 253/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
253static inline pmd_t pmd_mknotpresent(pmd_t pmd) 254static inline pmd_t pmd_mknotpresent(pmd_t pmd)
254{ 255{
255 return __pmd(0); 256 return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
256} 257}
257 258
258static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 259static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 348caabb7625..d62204060cbe 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
182#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 182#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
183 183
184#define pmd_none(pmd) (!pmd_val(pmd)) 184#define pmd_none(pmd) (!pmd_val(pmd))
185#define pmd_present(pmd) (pmd_val(pmd))
186 185
187static inline pte_t *pmd_page_vaddr(pmd_t pmd) 186static inline pte_t *pmd_page_vaddr(pmd_t pmd)
188{ 187{
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ef9119f7462e..4d9375814b53 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
733 if (ret) 733 if (ret)
734 return ret; 734 return ret;
735 735
736 vfp_flush_hwstate(thread);
737 thread->vfpstate.hard = new_vfp; 736 thread->vfpstate.hard = new_vfp;
737 vfp_flush_hwstate(thread);
738 738
739 return 0; 739 return 0;
740} 740}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index df90bc59bfce..861521606c6d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
486 486
487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
488{ 488{
489 trace_ipi_raise(target, ipi_types[ipinr]); 489 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
490 __smp_cross_call(target, ipinr); 490 __smp_cross_call(target, ipinr);
491} 491}
492 492
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e65aa7d11b20..20dcf6e904b2 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
61 select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210 61 select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
62 select CPU_EXYNOS4210 62 select CPU_EXYNOS4210
63 select GIC_NON_BANKED 63 select GIC_NON_BANKED
64 select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
65 select MIGHT_HAVE_CACHE_L2X0 64 select MIGHT_HAVE_CACHE_L2X0
66 help 65 help
67 Samsung EXYNOS4 (Cortex-A9) SoC based systems 66 Samsung EXYNOS4 (Cortex-A9) SoC based systems
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index a38b16b69923..b56de4b8cdf2 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
46static void __init imx6ul_enet_phy_init(void) 46static void __init imx6ul_enet_phy_init(void)
47{ 47{
48 if (IS_BUILTIN(CONFIG_PHYLIB)) 48 if (IS_BUILTIN(CONFIG_PHYLIB))
49 phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, 49 phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
50 ksz8081_phy_fixup); 50 ksz8081_phy_fixup);
51} 51}
52 52
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 5d7fb596bf4a..bf608441b357 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -43,8 +43,8 @@
43#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK) 43#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
44 44
45/* IRQ handler register bitmasks */ 45/* IRQ handler register bitmasks */
46#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE)) 46#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
47#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1) 47#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
48 48
49/* Driver buffer byte offsets */ 49/* Driver buffer byte offsets */
50#define BUF_MASK (FIQ_MASK * 4) 50#define BUF_MASK (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
110 mov r8, #2 @ reset FIQ agreement 110 mov r8, #2 @ reset FIQ agreement
111 str r8, [r12, #IRQ_CONTROL_REG_OFFSET] 111 str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
112 112
113 cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt? 113 cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
114 beq gpio @ yes - process it 114 beq gpio @ yes - process it
115 115
116 mov r8, #1 116 mov r8, #1
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index d1f12095f315..ec760ae2f917 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
109 * Since no set_type() method is provided by OMAP irq chip, 109 * Since no set_type() method is provided by OMAP irq chip,
110 * switch to edge triggered interrupt type manually. 110 * switch to edge triggered interrupt type manually.
111 */ 111 */
112 offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4; 112 offset = IRQ_ILR0_REG_OFFSET +
113 ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
113 val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); 114 val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
114 omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); 115 omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
115 116
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
149 /* 150 /*
150 * Redirect GPIO interrupts to FIQ 151 * Redirect GPIO interrupts to FIQ
151 */ 152 */
152 offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4; 153 offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
153 val = omap_readl(OMAP_IH1_BASE + offset) | 1; 154 val = omap_readl(OMAP_IH1_BASE + offset) | 1;
154 omap_writel(val, OMAP_IH1_BASE + offset); 155 omap_writel(val, OMAP_IH1_BASE + offset);
155} 156}
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
index adb5e7649659..6dfc3e1210a3 100644
--- a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -14,6 +14,8 @@
14#ifndef __AMS_DELTA_FIQ_H 14#ifndef __AMS_DELTA_FIQ_H
15#define __AMS_DELTA_FIQ_H 15#define __AMS_DELTA_FIQ_H
16 16
17#include <mach/irqs.h>
18
17/* 19/*
18 * Interrupt number used for passing control from FIQ to IRQ. 20 * Interrupt number used for passing control from FIQ to IRQ.
19 * IRQ12, described as reserved, has been selected. 21 * IRQ12, described as reserved, has been selected.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 0517f0c1581a..1a648e9dfaa0 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -17,6 +17,7 @@ config ARCH_OMAP3
17 select PM_OPP if PM 17 select PM_OPP if PM
18 select PM if CPU_IDLE 18 select PM if CPU_IDLE
19 select SOC_HAS_OMAP2_SDRC 19 select SOC_HAS_OMAP2_SDRC
20 select ARM_ERRATA_430973
20 21
21config ARCH_OMAP4 22config ARCH_OMAP4
22 bool "TI OMAP4" 23 bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
36 select PM if CPU_IDLE 37 select PM if CPU_IDLE
37 select ARM_ERRATA_754322 38 select ARM_ERRATA_754322
38 select ARM_ERRATA_775420 39 select ARM_ERRATA_775420
40 select OMAP_INTERCONNECT
39 41
40config SOC_OMAP5 42config SOC_OMAP5
41 bool "TI OMAP5" 43 bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
67 select HAVE_ARM_SCU 69 select HAVE_ARM_SCU
68 select GENERIC_CLOCKEVENTS_BROADCAST 70 select GENERIC_CLOCKEVENTS_BROADCAST
69 select HAVE_ARM_TWD 71 select HAVE_ARM_TWD
72 select ARM_ERRATA_754322
73 select ARM_ERRATA_775420
70 74
71config SOC_DRA7XX 75config SOC_DRA7XX
72 bool "TI DRA7XX" 76 bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
240 244
241endif 245endif
242 246
247config OMAP5_ERRATA_801819
248 bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
249 depends on SOC_OMAP5 || SOC_DRA7XX
250 help
251 A livelock can occur in the L2 cache arbitration that might prevent
252 a snoop from completing. Under certain conditions this can cause the
253 system to deadlock.
254
243endmenu 255endmenu
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index d9c3ffc39329..390795b334c3 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -39,7 +39,7 @@
39#include "gpmc.h" 39#include "gpmc.h"
40#include "gpmc-smsc911x.h" 40#include "gpmc-smsc911x.h"
41 41
42#include <video/omapdss.h> 42#include <linux/platform_data/omapdss.h>
43#include <video/omap-panel-data.h> 43#include <video/omap-panel-data.h>
44 44
45#include "board-flash.h" 45#include "board-flash.h"
@@ -47,6 +47,7 @@
47#include "hsmmc.h" 47#include "hsmmc.h"
48#include "control.h" 48#include "control.h"
49#include "common-board-devices.h" 49#include "common-board-devices.h"
50#include "display.h"
50 51
51#define LDP_SMSC911X_CS 1 52#define LDP_SMSC911X_CS 1
52#define LDP_SMSC911X_GPIO 152 53#define LDP_SMSC911X_GPIO 152
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 9cfebc5c7455..180c6aa633bd 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -15,13 +15,14 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <video/omapdss.h> 18#include <linux/platform_data/omapdss.h>
19#include <video/omap-panel-data.h> 19#include <video/omap-panel-data.h>
20 20
21#include <linux/platform_data/spi-omap2-mcspi.h> 21#include <linux/platform_data/spi-omap2-mcspi.h>
22 22
23#include "soc.h" 23#include "soc.h"
24#include "board-rx51.h" 24#include "board-rx51.h"
25#include "display.h"
25 26
26#include "mux.h" 27#include "mux.h"
27 28
@@ -32,7 +33,6 @@
32static struct connector_atv_platform_data rx51_tv_pdata = { 33static struct connector_atv_platform_data rx51_tv_pdata = {
33 .name = "tv", 34 .name = "tv",
34 .source = "venc.0", 35 .source = "venc.0",
35 .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
36 .invert_polarity = false, 36 .invert_polarity = false,
37}; 37};
38 38
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6ab13d18c636..70b3eaf085e4 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -29,7 +29,7 @@
29#include <linux/mfd/syscon.h> 29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h> 30#include <linux/regmap.h>
31 31
32#include <video/omapdss.h> 32#include <linux/platform_data/omapdss.h>
33#include "omap_hwmod.h" 33#include "omap_hwmod.h"
34#include "omap_device.h" 34#include "omap_device.h"
35#include "omap-pm.h" 35#include "omap-pm.h"
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
index 7375854b16c7..78f253005279 100644
--- a/arch/arm/mach-omap2/display.h
+++ b/arch/arm/mach-omap2/display.h
@@ -33,4 +33,9 @@ int omap_init_vout(void);
33 33
34struct device_node * __init omapdss_find_dss_of_node(void); 34struct device_node * __init omapdss_find_dss_of_node(void);
35 35
36struct omap_dss_board_info;
37
38/* Init with the board info */
39int omap_display_init(struct omap_dss_board_info *board_data);
40
36#endif 41#endif
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index ea2be0f5953b..1d583bc0b1a9 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -27,7 +27,7 @@
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29 29
30#include <video/omapdss.h> 30#include <linux/platform_data/omapdss.h>
31#include <video/omap-panel-data.h> 31#include <video/omap-panel-data.h>
32 32
33#include "soc.h" 33#include "soc.h"
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index af2851fbcdf0..bae263fba640 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -46,6 +46,7 @@
46 46
47#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109 47#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
48#define OMAP5_MON_AMBA_IF_INDEX 0x108 48#define OMAP5_MON_AMBA_IF_INDEX 0x108
49#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
49 50
50/* Secure PPA(Primary Protected Application) APIs */ 51/* Secure PPA(Primary Protected Application) APIs */
51#define OMAP4_PPA_L2_POR_INDEX 0x23 52#define OMAP4_PPA_L2_POR_INDEX 0x23
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index c625cc10d9f9..8cd1de914ee4 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
50 return scu_base; 50 return scu_base;
51} 51}
52 52
53#ifdef CONFIG_OMAP5_ERRATA_801819
54void omap5_erratum_workaround_801819(void)
55{
56 u32 acr, revidr;
57 u32 acr_mask;
58
59 /* REVIDR[3] indicates erratum fix available on silicon */
60 asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
61 if (revidr & (0x1 << 3))
62 return;
63
64 asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
65 /*
66 * BIT(27) - Disables streaming. All write-allocate lines allocate in
67 * the L1 or L2 cache.
68 * BIT(25) - Disables streaming. All write-allocate lines allocate in
69 * the L1 cache.
70 */
71 acr_mask = (0x3 << 25) | (0x3 << 27);
72 /* do we already have it done.. if yes, skip expensive smc */
73 if ((acr & acr_mask) == acr_mask)
74 return;
75
76 acr |= acr_mask;
77 omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
78
79 pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
80 __func__, smp_processor_id());
81}
82#else
83static inline void omap5_erratum_workaround_801819(void) { }
84#endif
85
53static void omap4_secondary_init(unsigned int cpu) 86static void omap4_secondary_init(unsigned int cpu)
54{ 87{
55 /* 88 /*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
64 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, 97 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
65 4, 0, 0, 0, 0, 0); 98 4, 0, 0, 0, 0, 0);
66 99
67 /* 100 if (soc_is_omap54xx() || soc_is_dra7xx()) {
68 * Configure the CNTFRQ register for the secondary cpu's which 101 /*
69 * indicates the frequency of the cpu local timers. 102 * Configure the CNTFRQ register for the secondary cpu's which
70 */ 103 * indicates the frequency of the cpu local timers.
71 if (soc_is_omap54xx() || soc_is_dra7xx()) 104 */
72 set_cntfreq(); 105 set_cntfreq();
106 /* Configure ACR to disable streaming WA for 801819 */
107 omap5_erratum_workaround_801819();
108 }
73 109
74 /* 110 /*
75 * Synchronise with the boot thread. 111 * Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
218 254
219 if (cpu_is_omap446x()) 255 if (cpu_is_omap446x())
220 startup_addr = omap4460_secondary_startup; 256 startup_addr = omap4460_secondary_startup;
257 if (soc_is_dra74x() || soc_is_omap54xx())
258 omap5_erratum_workaround_801819();
221 259
222 /* 260 /*
223 * Write the address of secondary startup routine into the 261 * Write the address of secondary startup routine into the
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 78af6d8cf2e2..daf2753de7aa 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
186 trace_state = (PWRDM_TRACE_STATES_FLAG | 186 trace_state = (PWRDM_TRACE_STATES_FLAG |
187 ((next & OMAP_POWERSTATE_MASK) << 8) | 187 ((next & OMAP_POWERSTATE_MASK) << 8) |
188 ((prev & OMAP_POWERSTATE_MASK) << 0)); 188 ((prev & OMAP_POWERSTATE_MASK) << 0));
189 trace_power_domain_target(pwrdm->name, trace_state, 189 trace_power_domain_target_rcuidle(pwrdm->name,
190 smp_processor_id()); 190 trace_state,
191 smp_processor_id());
191 } 192 }
192 break; 193 break;
193 default: 194 default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
523 524
524 if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { 525 if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
525 /* Trace the pwrdm desired target state */ 526 /* Trace the pwrdm desired target state */
526 trace_power_domain_target(pwrdm->name, pwrst, 527 trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
527 smp_processor_id()); 528 smp_processor_id());
528 /* Program the pwrdm desired target state */ 529 /* Program the pwrdm desired target state */
529 ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); 530 ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
530 } 531 }
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
index 0ec2d00f4237..eb350a673133 100644
--- a/arch/arm/mach-omap2/powerdomains7xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
36 .prcm_offs = DRA7XX_PRM_IVA_INST, 36 .prcm_offs = DRA7XX_PRM_IVA_INST,
37 .prcm_partition = DRA7XX_PRM_PARTITION, 37 .prcm_partition = DRA7XX_PRM_PARTITION,
38 .pwrsts = PWRSTS_OFF_ON, 38 .pwrsts = PWRSTS_OFF_ON,
39 .pwrsts_logic_ret = PWRSTS_OFF,
40 .banks = 4, 39 .banks = 4,
41 .pwrsts_mem_ret = {
42 [0] = PWRSTS_OFF_RET, /* hwa_mem */
43 [1] = PWRSTS_OFF_RET, /* sl2_mem */
44 [2] = PWRSTS_OFF_RET, /* tcm1_mem */
45 [3] = PWRSTS_OFF_RET, /* tcm2_mem */
46 },
47 .pwrsts_mem_on = { 40 .pwrsts_mem_on = {
48 [0] = PWRSTS_ON, /* hwa_mem */ 41 [0] = PWRSTS_ON, /* hwa_mem */
49 [1] = PWRSTS_ON, /* sl2_mem */ 42 [1] = PWRSTS_ON, /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
76 .prcm_offs = DRA7XX_PRM_IPU_INST, 69 .prcm_offs = DRA7XX_PRM_IPU_INST,
77 .prcm_partition = DRA7XX_PRM_PARTITION, 70 .prcm_partition = DRA7XX_PRM_PARTITION,
78 .pwrsts = PWRSTS_OFF_ON, 71 .pwrsts = PWRSTS_OFF_ON,
79 .pwrsts_logic_ret = PWRSTS_OFF,
80 .banks = 2, 72 .banks = 2,
81 .pwrsts_mem_ret = {
82 [0] = PWRSTS_OFF_RET, /* aessmem */
83 [1] = PWRSTS_OFF_RET, /* periphmem */
84 },
85 .pwrsts_mem_on = { 73 .pwrsts_mem_on = {
86 [0] = PWRSTS_ON, /* aessmem */ 74 [0] = PWRSTS_ON, /* aessmem */
87 [1] = PWRSTS_ON, /* periphmem */ 75 [1] = PWRSTS_ON, /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
95 .prcm_offs = DRA7XX_PRM_DSS_INST, 83 .prcm_offs = DRA7XX_PRM_DSS_INST,
96 .prcm_partition = DRA7XX_PRM_PARTITION, 84 .prcm_partition = DRA7XX_PRM_PARTITION,
97 .pwrsts = PWRSTS_OFF_ON, 85 .pwrsts = PWRSTS_OFF_ON,
98 .pwrsts_logic_ret = PWRSTS_OFF,
99 .banks = 1, 86 .banks = 1,
100 .pwrsts_mem_ret = {
101 [0] = PWRSTS_OFF_RET, /* dss_mem */
102 },
103 .pwrsts_mem_on = { 87 .pwrsts_mem_on = {
104 [0] = PWRSTS_ON, /* dss_mem */ 88 [0] = PWRSTS_ON, /* dss_mem */
105 }, 89 },
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
111 .name = "l4per_pwrdm", 95 .name = "l4per_pwrdm",
112 .prcm_offs = DRA7XX_PRM_L4PER_INST, 96 .prcm_offs = DRA7XX_PRM_L4PER_INST,
113 .prcm_partition = DRA7XX_PRM_PARTITION, 97 .prcm_partition = DRA7XX_PRM_PARTITION,
114 .pwrsts = PWRSTS_RET_ON, 98 .pwrsts = PWRSTS_ON,
115 .pwrsts_logic_ret = PWRSTS_RET,
116 .banks = 2, 99 .banks = 2,
117 .pwrsts_mem_ret = {
118 [0] = PWRSTS_OFF_RET, /* nonretained_bank */
119 [1] = PWRSTS_OFF_RET, /* retained_bank */
120 },
121 .pwrsts_mem_on = { 100 .pwrsts_mem_on = {
122 [0] = PWRSTS_ON, /* nonretained_bank */ 101 [0] = PWRSTS_ON, /* nonretained_bank */
123 [1] = PWRSTS_ON, /* retained_bank */ 102 [1] = PWRSTS_ON, /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
132 .prcm_partition = DRA7XX_PRM_PARTITION, 111 .prcm_partition = DRA7XX_PRM_PARTITION,
133 .pwrsts = PWRSTS_OFF_ON, 112 .pwrsts = PWRSTS_OFF_ON,
134 .banks = 1, 113 .banks = 1,
135 .pwrsts_mem_ret = {
136 [0] = PWRSTS_OFF_RET, /* gpu_mem */
137 },
138 .pwrsts_mem_on = { 114 .pwrsts_mem_on = {
139 [0] = PWRSTS_ON, /* gpu_mem */ 115 [0] = PWRSTS_ON, /* gpu_mem */
140 }, 116 },
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
148 .prcm_partition = DRA7XX_PRM_PARTITION, 124 .prcm_partition = DRA7XX_PRM_PARTITION,
149 .pwrsts = PWRSTS_ON, 125 .pwrsts = PWRSTS_ON,
150 .banks = 1, 126 .banks = 1,
151 .pwrsts_mem_ret = {
152 },
153 .pwrsts_mem_on = { 127 .pwrsts_mem_on = {
154 [0] = PWRSTS_ON, /* wkup_bank */ 128 [0] = PWRSTS_ON, /* wkup_bank */
155 }, 129 },
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
161 .prcm_offs = DRA7XX_PRM_CORE_INST, 135 .prcm_offs = DRA7XX_PRM_CORE_INST,
162 .prcm_partition = DRA7XX_PRM_PARTITION, 136 .prcm_partition = DRA7XX_PRM_PARTITION,
163 .pwrsts = PWRSTS_ON, 137 .pwrsts = PWRSTS_ON,
164 .pwrsts_logic_ret = PWRSTS_RET,
165 .banks = 5, 138 .banks = 5,
166 .pwrsts_mem_ret = {
167 [0] = PWRSTS_OFF_RET, /* core_nret_bank */
168 [1] = PWRSTS_OFF_RET, /* core_ocmram */
169 [2] = PWRSTS_OFF_RET, /* core_other_bank */
170 [3] = PWRSTS_OFF_RET, /* ipu_l2ram */
171 [4] = PWRSTS_OFF_RET, /* ipu_unicache */
172 },
173 .pwrsts_mem_on = { 139 .pwrsts_mem_on = {
174 [0] = PWRSTS_ON, /* core_nret_bank */ 140 [0] = PWRSTS_ON, /* core_nret_bank */
175 [1] = PWRSTS_ON, /* core_ocmram */ 141 [1] = PWRSTS_ON, /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
226 .prcm_offs = DRA7XX_PRM_VPE_INST, 192 .prcm_offs = DRA7XX_PRM_VPE_INST,
227 .prcm_partition = DRA7XX_PRM_PARTITION, 193 .prcm_partition = DRA7XX_PRM_PARTITION,
228 .pwrsts = PWRSTS_OFF_ON, 194 .pwrsts = PWRSTS_OFF_ON,
229 .pwrsts_logic_ret = PWRSTS_OFF,
230 .banks = 1, 195 .banks = 1,
231 .pwrsts_mem_ret = {
232 [0] = PWRSTS_OFF_RET, /* vpe_bank */
233 },
234 .pwrsts_mem_on = { 196 .pwrsts_mem_on = {
235 [0] = PWRSTS_ON, /* vpe_bank */ 197 [0] = PWRSTS_ON, /* vpe_bank */
236 }, 198 },
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
260 .name = "l3init_pwrdm", 222 .name = "l3init_pwrdm",
261 .prcm_offs = DRA7XX_PRM_L3INIT_INST, 223 .prcm_offs = DRA7XX_PRM_L3INIT_INST,
262 .prcm_partition = DRA7XX_PRM_PARTITION, 224 .prcm_partition = DRA7XX_PRM_PARTITION,
263 .pwrsts = PWRSTS_RET_ON, 225 .pwrsts = PWRSTS_ON,
264 .pwrsts_logic_ret = PWRSTS_RET,
265 .banks = 3, 226 .banks = 3,
266 .pwrsts_mem_ret = {
267 [0] = PWRSTS_OFF_RET, /* gmac_bank */
268 [1] = PWRSTS_OFF_RET, /* l3init_bank1 */
269 [2] = PWRSTS_OFF_RET, /* l3init_bank2 */
270 },
271 .pwrsts_mem_on = { 227 .pwrsts_mem_on = {
272 [0] = PWRSTS_ON, /* gmac_bank */ 228 [0] = PWRSTS_ON, /* gmac_bank */
273 [1] = PWRSTS_ON, /* l3init_bank1 */ 229 [1] = PWRSTS_ON, /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
283 .prcm_partition = DRA7XX_PRM_PARTITION, 239 .prcm_partition = DRA7XX_PRM_PARTITION,
284 .pwrsts = PWRSTS_OFF_ON, 240 .pwrsts = PWRSTS_OFF_ON,
285 .banks = 1, 241 .banks = 1,
286 .pwrsts_mem_ret = {
287 [0] = PWRSTS_OFF_RET, /* eve3_bank */
288 },
289 .pwrsts_mem_on = { 242 .pwrsts_mem_on = {
290 [0] = PWRSTS_ON, /* eve3_bank */ 243 [0] = PWRSTS_ON, /* eve3_bank */
291 }, 244 },
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
299 .prcm_partition = DRA7XX_PRM_PARTITION, 252 .prcm_partition = DRA7XX_PRM_PARTITION,
300 .pwrsts = PWRSTS_OFF_ON, 253 .pwrsts = PWRSTS_OFF_ON,
301 .banks = 1, 254 .banks = 1,
302 .pwrsts_mem_ret = {
303 [0] = PWRSTS_OFF_RET, /* emu_bank */
304 },
305 .pwrsts_mem_on = { 255 .pwrsts_mem_on = {
306 [0] = PWRSTS_ON, /* emu_bank */ 256 [0] = PWRSTS_ON, /* emu_bank */
307 }, 257 },
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
314 .prcm_partition = DRA7XX_PRM_PARTITION, 264 .prcm_partition = DRA7XX_PRM_PARTITION,
315 .pwrsts = PWRSTS_OFF_ON, 265 .pwrsts = PWRSTS_OFF_ON,
316 .banks = 3, 266 .banks = 3,
317 .pwrsts_mem_ret = {
318 [0] = PWRSTS_OFF_RET, /* dsp2_edma */
319 [1] = PWRSTS_OFF_RET, /* dsp2_l1 */
320 [2] = PWRSTS_OFF_RET, /* dsp2_l2 */
321 },
322 .pwrsts_mem_on = { 267 .pwrsts_mem_on = {
323 [0] = PWRSTS_ON, /* dsp2_edma */ 268 [0] = PWRSTS_ON, /* dsp2_edma */
324 [1] = PWRSTS_ON, /* dsp2_l1 */ 269 [1] = PWRSTS_ON, /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
334 .prcm_partition = DRA7XX_PRM_PARTITION, 279 .prcm_partition = DRA7XX_PRM_PARTITION,
335 .pwrsts = PWRSTS_OFF_ON, 280 .pwrsts = PWRSTS_OFF_ON,
336 .banks = 3, 281 .banks = 3,
337 .pwrsts_mem_ret = {
338 [0] = PWRSTS_OFF_RET, /* dsp1_edma */
339 [1] = PWRSTS_OFF_RET, /* dsp1_l1 */
340 [2] = PWRSTS_OFF_RET, /* dsp1_l2 */
341 },
342 .pwrsts_mem_on = { 282 .pwrsts_mem_on = {
343 [0] = PWRSTS_ON, /* dsp1_edma */ 283 [0] = PWRSTS_ON, /* dsp1_edma */
344 [1] = PWRSTS_ON, /* dsp1_l1 */ 284 [1] = PWRSTS_ON, /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
354 .prcm_partition = DRA7XX_PRM_PARTITION, 294 .prcm_partition = DRA7XX_PRM_PARTITION,
355 .pwrsts = PWRSTS_OFF_ON, 295 .pwrsts = PWRSTS_OFF_ON,
356 .banks = 1, 296 .banks = 1,
357 .pwrsts_mem_ret = {
358 [0] = PWRSTS_OFF_RET, /* vip_bank */
359 },
360 .pwrsts_mem_on = { 297 .pwrsts_mem_on = {
361 [0] = PWRSTS_ON, /* vip_bank */ 298 [0] = PWRSTS_ON, /* vip_bank */
362 }, 299 },
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
370 .prcm_partition = DRA7XX_PRM_PARTITION, 307 .prcm_partition = DRA7XX_PRM_PARTITION,
371 .pwrsts = PWRSTS_OFF_ON, 308 .pwrsts = PWRSTS_OFF_ON,
372 .banks = 1, 309 .banks = 1,
373 .pwrsts_mem_ret = {
374 [0] = PWRSTS_OFF_RET, /* eve4_bank */
375 },
376 .pwrsts_mem_on = { 310 .pwrsts_mem_on = {
377 [0] = PWRSTS_ON, /* eve4_bank */ 311 [0] = PWRSTS_ON, /* eve4_bank */
378 }, 312 },
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
386 .prcm_partition = DRA7XX_PRM_PARTITION, 320 .prcm_partition = DRA7XX_PRM_PARTITION,
387 .pwrsts = PWRSTS_OFF_ON, 321 .pwrsts = PWRSTS_OFF_ON,
388 .banks = 1, 322 .banks = 1,
389 .pwrsts_mem_ret = {
390 [0] = PWRSTS_OFF_RET, /* eve2_bank */
391 },
392 .pwrsts_mem_on = { 323 .pwrsts_mem_on = {
393 [0] = PWRSTS_ON, /* eve2_bank */ 324 [0] = PWRSTS_ON, /* eve2_bank */
394 }, 325 },
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
402 .prcm_partition = DRA7XX_PRM_PARTITION, 333 .prcm_partition = DRA7XX_PRM_PARTITION,
403 .pwrsts = PWRSTS_OFF_ON, 334 .pwrsts = PWRSTS_OFF_ON,
404 .banks = 1, 335 .banks = 1,
405 .pwrsts_mem_ret = {
406 [0] = PWRSTS_OFF_RET, /* eve1_bank */
407 },
408 .pwrsts_mem_on = { 336 .pwrsts_mem_on = {
409 [0] = PWRSTS_ON, /* eve1_bank */ 337 [0] = PWRSTS_ON, /* eve1_bank */
410 }, 338 },
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 5b385bb8aff9..cb9497a20fb3 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
496 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", 496 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
497 2, "timer_sys_ck", NULL, false); 497 2, "timer_sys_ck", NULL, false);
498 498
499 if (of_have_populated_dt()) 499 clocksource_probe();
500 clocksource_probe();
501} 500}
502 501
503#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) 502#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
505{ 504{
506 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", 505 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
507 2, "timer_sys_ck", NULL, false); 506 2, "timer_sys_ck", NULL, false);
507
508 clocksource_probe();
508} 509}
509#endif /* CONFIG_ARCH_OMAP3 */ 510#endif /* CONFIG_ARCH_OMAP3 */
510 511
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
513{ 514{
514 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, 515 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
515 1, "timer_sys_ck", "ti,timer-alwon", true); 516 1, "timer_sys_ck", "ti,timer-alwon", true);
517
518 clocksource_probe();
516} 519}
517#endif 520#endif
518 521
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 5766ce2be32b..8409cab3f760 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
547 547
548 init.name = dev_name(cpu_dev); 548 init.name = dev_name(cpu_dev);
549 init.ops = &clk_spc_ops; 549 init.ops = &clk_spc_ops;
550 init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; 550 init.flags = CLK_GET_RATE_NOCACHE;
551 init.num_parents = 0; 551 init.num_parents = 0;
552 552
553 return devm_clk_register(cpu_dev, &spc->hw); 553 return devm_clk_register(cpu_dev, &spc->hw);
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 84baa16f4c0b..e93aa6734147 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -68,7 +68,7 @@
68#include <linux/platform_data/asoc-s3c.h> 68#include <linux/platform_data/asoc-s3c.h>
69#include <linux/platform_data/spi-s3c64xx.h> 69#include <linux/platform_data/spi-s3c64xx.h>
70 70
71static u64 samsung_device_dma_mask = DMA_BIT_MASK(32); 71#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
72 72
73/* AC97 */ 73/* AC97 */
74#ifdef CONFIG_CPU_S3C2440 74#ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 76747d92bc72..5a0a691d4220 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
113config MMU 113config MMU
114 def_bool y 114 def_bool y
115 115
116config ARM64_PAGE_SHIFT
117 int
118 default 16 if ARM64_64K_PAGES
119 default 14 if ARM64_16K_PAGES
120 default 12
121
122config ARM64_CONT_SHIFT
123 int
124 default 5 if ARM64_64K_PAGES
125 default 7 if ARM64_16K_PAGES
126 default 4
127
116config ARCH_MMAP_RND_BITS_MIN 128config ARCH_MMAP_RND_BITS_MIN
117 default 14 if ARM64_64K_PAGES 129 default 14 if ARM64_64K_PAGES
118 default 16 if ARM64_16K_PAGES 130 default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
426 438
427 If unsure, say Y. 439 If unsure, say Y.
428 440
441config CAVIUM_ERRATUM_23144
442 bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
443 depends on NUMA
444 default y
445 help
446 ITS SYNC command hang for cross node io and collections/cpu mapping.
447
448 If unsure, say Y.
449
429config CAVIUM_ERRATUM_23154 450config CAVIUM_ERRATUM_23154
430 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" 451 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
431 default y 452 default y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 710fde4ad0f0..0cc758cdd0dc 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -12,7 +12,8 @@ config ARM64_PTDUMP
12 who are working in architecture specific areas of the kernel. 12 who are working in architecture specific areas of the kernel.
13 It is probably not a good idea to enable this feature in a production 13 It is probably not a good idea to enable this feature in a production
14 kernel. 14 kernel.
15 If in doubt, say "N" 15
16 If in doubt, say N.
16 17
17config PID_IN_CONTEXTIDR 18config PID_IN_CONTEXTIDR
18 bool "Write the current PID to the CONTEXTIDR register" 19 bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
38 value. 39 value.
39 40
40config DEBUG_SET_MODULE_RONX 41config DEBUG_SET_MODULE_RONX
41 bool "Set loadable kernel module data as NX and text as RO" 42 bool "Set loadable kernel module data as NX and text as RO"
42 depends on MODULES 43 depends on MODULES
43 help 44 default y
44 This option helps catch unintended modifications to loadable 45 help
45 kernel module's text and read-only data. It also prevents execution 46 Is this is set, kernel module text and rodata will be made read-only.
46 of module data. Such protection may interfere with run-time code 47 This is to help catch accidental or malicious attempts to change the
47 patching and dynamic kernel tracing - and they might also protect 48 kernel's executable code.
48 against certain classes of kernel exploits. 49
49 If in doubt, say "N". 50 If in doubt, say Y.
50 51
51config DEBUG_RODATA 52config DEBUG_RODATA
52 bool "Make kernel text and rodata read-only" 53 bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
56 is to help catch accidental or malicious attempts to change the 57 is to help catch accidental or malicious attempts to change the
57 kernel's executable code. 58 kernel's executable code.
58 59
59 If in doubt, say Y 60 If in doubt, say Y.
60 61
61config DEBUG_ALIGN_RODATA 62config DEBUG_ALIGN_RODATA
62 depends on DEBUG_RODATA 63 depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
69 alignment and potentially wasted space. Turn on this option if 70 alignment and potentially wasted space. Turn on this option if
70 performance is more important than memory pressure. 71 performance is more important than memory pressure.
71 72
72 If in doubt, say N 73 If in doubt, say N.
73 74
74source "drivers/hwtracing/coresight/Kconfig" 75source "drivers/hwtracing/coresight/Kconfig"
75 76
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 354d75402ace..648a32c89541 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o
60 60
61# The byte offset of the kernel image in RAM from the start of RAM. 61# The byte offset of the kernel image in RAM from the start of RAM.
62ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) 62ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
63TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') 63TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
64 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
65 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
64else 66else
65TEXT_OFFSET := 0x00080000 67TEXT_OFFSET := 0x00080000
66endif 68endif
@@ -93,7 +95,7 @@ boot := arch/arm64/boot
93Image: vmlinux 95Image: vmlinux
94 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 96 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
95 97
96Image.%: vmlinux 98Image.%: Image
97 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 99 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
98 100
99zinstall install: 101zinstall install:
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 3a4e9a2ab313..fbafa24cd533 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -125,7 +125,7 @@
125 #size-cells = <1>; 125 #size-cells = <1>;
126 #interrupts-cells = <3>; 126 #interrupts-cells = <3>;
127 127
128 compatible = "arm,amba-bus"; 128 compatible = "simple-bus";
129 interrupt-parent = <&gic>; 129 interrupt-parent = <&gic>;
130 ranges; 130 ranges;
131 131
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 46f325a143b0..d7f8e06910bc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -163,7 +163,7 @@
163 }; 163 };
164 164
165 amba { 165 amba {
166 compatible = "arm,amba-bus"; 166 compatible = "simple-bus";
167 #address-cells = <2>; 167 #address-cells = <2>;
168 #size-cells = <2>; 168 #size-cells = <2>;
169 ranges; 169 ranges;
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7a09c48c0475..579b6e654f2d 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
160#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) 160#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
161#endif 161#endif
162 162
163#ifdef CONFIG_COMPAT
164
165#ifdef __AARCH64EB__ 163#ifdef __AARCH64EB__
166#define COMPAT_ELF_PLATFORM ("v8b") 164#define COMPAT_ELF_PLATFORM ("v8b")
167#else 165#else
168#define COMPAT_ELF_PLATFORM ("v8l") 166#define COMPAT_ELF_PLATFORM ("v8l")
169#endif 167#endif
170 168
169#ifdef CONFIG_COMPAT
170
171#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) 171#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
172 172
173/* AArch32 registers. */ 173/* AArch32 registers. */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index f69f69c8120c..da84645525b9 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
38#endif /* !__ASSEMBLY__ */ 38#endif /* !__ASSEMBLY__ */
39 39
40/* 40/*
41 * gdb is expecting the following registers layout. 41 * gdb remote procotol (well most versions of it) expects the following
42 * register layout.
42 * 43 *
43 * General purpose regs: 44 * General purpose regs:
44 * r0-r30: 64 bit 45 * r0-r30: 64 bit
45 * sp,pc : 64 bit 46 * sp,pc : 64 bit
46 * pstate : 64 bit 47 * pstate : 32 bit
47 * Total: 34 48 * Total: 33 + 1
48 * FPU regs: 49 * FPU regs:
49 * f0-f31: 128 bit 50 * f0-f31: 128 bit
50 * Total: 32
51 * Extra regs
52 * fpsr & fpcr: 32 bit 51 * fpsr & fpcr: 32 bit
53 * Total: 2 52 * Total: 32 + 2
54 * 53 *
54 * To expand a little on the "most versions of it"... when the gdb remote
55 * protocol for AArch64 was developed it depended on a statement in the
56 * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
57 * and, as a result, allocated only 32-bits for the PSTATE in the remote
58 * protocol. In fact this statement is still present in ARM DDI 0487A.i.
59 *
60 * Unfortunately "is a 32-bit register" has a very special meaning for
61 * system registers. It means that "the upper bits, bits[63:32], are
62 * RES0.". RES0 is heavily used in the ARM architecture documents as a
63 * way to leave space for future architecture changes. So to translate a
64 * little for people who don't spend their spare time reading ARM architecture
65 * manuals, what "is a 32-bit register" actually means in this context is
66 * "is a 64-bit register but one with no meaning allocated to any of the
67 * upper 32-bits... *yet*".
68 *
69 * Perhaps then we should not be surprised that this has led to some
70 * confusion. Specifically a patch, influenced by the above translation,
71 * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
72 * was reverted in gdb-7.8.1 and all later releases, when this was
73 * discovered to be an undocumented protocol change.
74 *
75 * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
76 * here even though the kernel itself allocates 64-bits for the same
77 * state. That is because this bit of code tells the kernel how the gdb
78 * remote protocol (well most versions of it) describes the register state.
79 *
80 * Note that if you are using one of the versions of gdb that supports
81 * the gdb-7.7 version of the protocol you cannot use kgdb directly
82 * without providing a custom register description (gdb can load new
83 * protocol descriptions at runtime).
55 */ 84 */
56 85
57#define _GP_REGS 34 86#define _GP_REGS 33
58#define _FP_REGS 32 87#define _FP_REGS 32
59#define _EXTRA_REGS 2 88#define _EXTRA_REGS 3
60/* 89/*
61 * general purpose registers size in bytes. 90 * general purpose registers size in bytes.
62 * pstate is only 4 bytes. subtract 4 bytes 91 * pstate is only 4 bytes. subtract 4 bytes
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 72a3025bb583..31b73227b41f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -55,8 +55,9 @@
55#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) 55#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
56 56
57/* 57/*
58 * PAGE_OFFSET - the virtual address of the start of the kernel image (top 58 * PAGE_OFFSET - the virtual address of the start of the linear map (top
59 * (VA_BITS - 1)) 59 * (VA_BITS - 1))
60 * KIMAGE_VADDR - the virtual address of the start of the kernel image
60 * VA_BITS - the maximum number of bits for virtual addresses. 61 * VA_BITS - the maximum number of bits for virtual addresses.
61 * VA_START - the first kernel virtual address. 62 * VA_START - the first kernel virtual address.
62 * TASK_SIZE - the maximum size of a user space task. 63 * TASK_SIZE - the maximum size of a user space task.
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 17b45f7d96d3..8472c6def5ef 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -23,16 +23,8 @@
23 23
24/* PAGE_SHIFT determines the page size */ 24/* PAGE_SHIFT determines the page size */
25/* CONT_SHIFT determines the number of pages which can be tracked together */ 25/* CONT_SHIFT determines the number of pages which can be tracked together */
26#ifdef CONFIG_ARM64_64K_PAGES 26#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
27#define PAGE_SHIFT 16 27#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
28#define CONT_SHIFT 5
29#elif defined(CONFIG_ARM64_16K_PAGES)
30#define PAGE_SHIFT 14
31#define CONT_SHIFT 7
32#else
33#define PAGE_SHIFT 12
34#define CONT_SHIFT 4
35#endif
36#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 28#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
37#define PAGE_MASK (~(PAGE_SIZE-1)) 29#define PAGE_MASK (~(PAGE_SIZE-1))
38 30
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index ff98585d085a..d25f4f137c2a 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,7 +26,7 @@
26 26
27#define check_pgt_cache() do { } while (0) 27#define check_pgt_cache() do { } while (0)
28 28
29#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 29#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
30#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 30#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
31 31
32#if CONFIG_PGTABLE_LEVELS > 2 32#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 433e50405274..022644704a93 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
124 cpu_park_loop(); 124 cpu_park_loop();
125} 125}
126 126
127/*
128 * If a secondary CPU enters the kernel but fails to come online,
129 * (e.g. due to mismatched features), and cannot exit the kernel,
130 * we increment cpus_stuck_in_kernel and leave the CPU in a
131 * quiesecent loop within the kernel text. The memory containing
132 * this loop must not be re-used for anything else as the 'stuck'
133 * core is executing it.
134 *
135 * This function is used to inhibit features like kexec and hibernate.
136 */
137bool cpus_are_stuck_in_kernel(void);
138
127#endif /* ifndef __ASSEMBLY__ */ 139#endif /* ifndef __ASSEMBLY__ */
128 140
129#endif /* ifndef __ASM_SMP_H */ 141#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fc9682bfe002..e875a5a551d7 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30{ 30{
31 unsigned int tmp; 31 unsigned int tmp;
32 arch_spinlock_t lockval; 32 arch_spinlock_t lockval;
33 u32 owner;
34
35 /*
36 * Ensure prior spin_lock operations to other locks have completed
37 * on this CPU before we test whether "lock" is locked.
38 */
39 smp_mb();
40 owner = READ_ONCE(lock->owner) << 16;
33 41
34 asm volatile( 42 asm volatile(
35" sevl\n" 43" sevl\n"
36"1: wfe\n" 44"1: wfe\n"
37"2: ldaxr %w0, %2\n" 45"2: ldaxr %w0, %2\n"
46 /* Is the lock free? */
38" eor %w1, %w0, %w0, ror #16\n" 47" eor %w1, %w0, %w0, ror #16\n"
39" cbnz %w1, 1b\n" 48" cbz %w1, 3f\n"
49 /* Lock taken -- has there been a subsequent unlock->lock transition? */
50" eor %w1, %w3, %w0, lsl #16\n"
51" cbz %w1, 1b\n"
52 /*
53 * The owner has been updated, so there was an unlock->lock
54 * transition that we missed. That means we can rely on the
55 * store-release of the unlock operation paired with the
56 * load-acquire of the lock operation to publish any of our
57 * previous stores to the new lock owner and therefore don't
58 * need to bother with the writeback below.
59 */
60" b 4f\n"
61"3:\n"
62 /*
63 * Serialise against any concurrent lockers by writing back the
64 * unlocked lock value
65 */
40 ARM64_LSE_ATOMIC_INSN( 66 ARM64_LSE_ATOMIC_INSN(
41 /* LL/SC */ 67 /* LL/SC */
42" stxr %w1, %w0, %2\n" 68" stxr %w1, %w0, %2\n"
43" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
44 /* LSE atomics */
45" nop\n" 69" nop\n"
46" nop\n") 70" nop\n",
71 /* LSE atomics */
72" mov %w1, %w0\n"
73" cas %w0, %w0, %2\n"
74" eor %w1, %w1, %w0\n")
75 /* Somebody else wrote to the lock, GOTO 10 and reload the value */
76" cbnz %w1, 2b\n"
77"4:"
47 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) 78 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
48 : 79 : "r" (owner)
49 : "memory"); 80 : "memory");
50} 81}
51 82
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
148 179
149static inline int arch_spin_is_locked(arch_spinlock_t *lock) 180static inline int arch_spin_is_locked(arch_spinlock_t *lock)
150{ 181{
182 smp_mb(); /* See arch_spin_unlock_wait */
151 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 183 return !arch_spin_value_unlocked(READ_ONCE(*lock));
152} 184}
153 185
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..9e397a542756 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs)
81#define segment_eq(a, b) ((a) == (b)) 81#define segment_eq(a, b) ((a) == (b))
82 82
83/* 83/*
84 * Return 1 if addr < current->addr_limit, 0 otherwise.
85 */
86#define __addr_ok(addr) \
87({ \
88 unsigned long flag; \
89 asm("cmp %1, %0; cset %0, lo" \
90 : "=&r" (flag) \
91 : "r" (addr), "0" (current_thread_info()->addr_limit) \
92 : "cc"); \
93 flag; \
94})
95
96/*
97 * Test whether a block of memory is a valid user space address. 84 * Test whether a block of memory is a valid user space address.
98 * Returns 1 if the range is valid, 0 otherwise. 85 * Returns 1 if the range is valid, 0 otherwise.
99 * 86 *
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 41e58fe3c041..e78ac26324bd 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 390 47#define __NR_compat_syscalls 394
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 5b925b761a2a..b7e8ef16ff0d 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
801__SYSCALL(__NR_userfaultfd, sys_userfaultfd) 801__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
802#define __NR_membarrier 389 802#define __NR_membarrier 389
803__SYSCALL(__NR_membarrier, sys_membarrier) 803__SYSCALL(__NR_membarrier, sys_membarrier)
804#define __NR_mlock2 390
805__SYSCALL(__NR_mlock2, sys_mlock2)
806#define __NR_copy_file_range 391
807__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
808#define __NR_preadv2 392
809__SYSCALL(__NR_preadv2, compat_sys_preadv2)
810#define __NR_pwritev2 393
811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
804 812
805/* 813/*
806 * Please add new compat syscalls above this comment and update 814 * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3808470486f3..c173d329397f 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -22,6 +22,8 @@
22 22
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/bug.h> 24#include <linux/bug.h>
25#include <linux/compat.h>
26#include <linux/elf.h>
25#include <linux/init.h> 27#include <linux/init.h>
26#include <linux/kernel.h> 28#include <linux/kernel.h>
27#include <linux/personality.h> 29#include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
104static int c_show(struct seq_file *m, void *v) 106static int c_show(struct seq_file *m, void *v)
105{ 107{
106 int i, j; 108 int i, j;
109 bool compat = personality(current->personality) == PER_LINUX32;
107 110
108 for_each_online_cpu(i) { 111 for_each_online_cpu(i) {
109 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); 112 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
115 * "processor". Give glibc what it expects. 118 * "processor". Give glibc what it expects.
116 */ 119 */
117 seq_printf(m, "processor\t: %d\n", i); 120 seq_printf(m, "processor\t: %d\n", i);
121 if (compat)
122 seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
123 MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
118 124
119 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 125 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
120 loops_per_jiffy / (500000UL/HZ), 126 loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
127 * software which does already (at least for 32-bit). 133 * software which does already (at least for 32-bit).
128 */ 134 */
129 seq_puts(m, "Features\t:"); 135 seq_puts(m, "Features\t:");
130 if (personality(current->personality) == PER_LINUX32) { 136 if (compat) {
131#ifdef CONFIG_COMPAT 137#ifdef CONFIG_COMPAT
132 for (j = 0; compat_hwcap_str[j]; j++) 138 for (j = 0; compat_hwcap_str[j]; j++)
133 if (compat_elf_hwcap & (1 << j)) 139 if (compat_elf_hwcap & (1 << j))
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f8df75d740f4..21ab5df9fa76 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -33,6 +33,7 @@
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/pgtable-hwdef.h> 34#include <asm/pgtable-hwdef.h>
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/smp.h>
36#include <asm/suspend.h> 37#include <asm/suspend.h>
37#include <asm/virt.h> 38#include <asm/virt.h>
38 39
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
236 unsigned long flags; 237 unsigned long flags;
237 struct sleep_stack_data state; 238 struct sleep_stack_data state;
238 239
240 if (cpus_are_stuck_in_kernel()) {
241 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
242 return -EBUSY;
243 }
244
239 local_dbg_save(flags); 245 local_dbg_save(flags);
240 246
241 if (__cpu_suspend_enter(&state)) { 247 if (__cpu_suspend_enter(&state)) {
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index b67531a13136..b5f063e5eff7 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
58 { "x30", 8, offsetof(struct pt_regs, regs[30])}, 58 { "x30", 8, offsetof(struct pt_regs, regs[30])},
59 { "sp", 8, offsetof(struct pt_regs, sp)}, 59 { "sp", 8, offsetof(struct pt_regs, sp)},
60 { "pc", 8, offsetof(struct pt_regs, pc)}, 60 { "pc", 8, offsetof(struct pt_regs, pc)},
61 { "pstate", 8, offsetof(struct pt_regs, pstate)}, 61 /*
62 * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
63 * protocol disagrees. Therefore we must extract only the lower
64 * 32-bits. Look for the big comment in asm/kgdb.h for more
65 * detail.
66 */
67 { "pstate", 4, offsetof(struct pt_regs, pstate)
68#ifdef CONFIG_CPU_BIG_ENDIAN
69 + 4
70#endif
71 },
62 { "v0", 16, -1 }, 72 { "v0", 16, -1 },
63 { "v1", 16, -1 }, 73 { "v1", 16, -1 },
64 { "v2", 16, -1 }, 74 { "v2", 16, -1 },
@@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
128 memset((char *)gdb_regs, 0, NUMREGBYTES); 138 memset((char *)gdb_regs, 0, NUMREGBYTES);
129 thread_regs = task_pt_regs(task); 139 thread_regs = task_pt_regs(task);
130 memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); 140 memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
141 /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
142 dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
131} 143}
132 144
133void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) 145void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 678e0842cb3b..62ff3c0622e2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
909{ 909{
910 return -EINVAL; 910 return -EINVAL;
911} 911}
912
913static bool have_cpu_die(void)
914{
915#ifdef CONFIG_HOTPLUG_CPU
916 int any_cpu = raw_smp_processor_id();
917
918 if (cpu_ops[any_cpu]->cpu_die)
919 return true;
920#endif
921 return false;
922}
923
924bool cpus_are_stuck_in_kernel(void)
925{
926 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
927
928 return !!cpus_stuck_in_kernel || smp_spin_tables;
929}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c5392081b49b..2a43012616b7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
64 64
65 /* 65 /*
66 * We need to switch to kernel mode so that we can use __get_user 66 * We need to switch to kernel mode so that we can use __get_user
67 * to safely read from kernel space. Note that we now dump the 67 * to safely read from kernel space.
68 * code first, just in case the backtrace kills us.
69 */ 68 */
70 fs = get_fs(); 69 fs = get_fs();
71 set_fs(KERNEL_DS); 70 set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
111 print_ip_sym(where); 110 print_ip_sym(where);
112} 111}
113 112
114static void dump_instr(const char *lvl, struct pt_regs *regs) 113static void __dump_instr(const char *lvl, struct pt_regs *regs)
115{ 114{
116 unsigned long addr = instruction_pointer(regs); 115 unsigned long addr = instruction_pointer(regs);
117 mm_segment_t fs;
118 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; 116 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
119 int i; 117 int i;
120 118
121 /*
122 * We need to switch to kernel mode so that we can use __get_user
123 * to safely read from kernel space. Note that we now dump the
124 * code first, just in case the backtrace kills us.
125 */
126 fs = get_fs();
127 set_fs(KERNEL_DS);
128
129 for (i = -4; i < 1; i++) { 119 for (i = -4; i < 1; i++) {
130 unsigned int val, bad; 120 unsigned int val, bad;
131 121
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
139 } 129 }
140 } 130 }
141 printk("%sCode: %s\n", lvl, str); 131 printk("%sCode: %s\n", lvl, str);
132}
142 133
143 set_fs(fs); 134static void dump_instr(const char *lvl, struct pt_regs *regs)
135{
136 if (!user_mode(regs)) {
137 mm_segment_t fs = get_fs();
138 set_fs(KERNEL_DS);
139 __dump_instr(lvl, regs);
140 set_fs(fs);
141 } else {
142 __dump_instr(lvl, regs);
143 }
144} 144}
145 145
146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
477 void __user *pc = (void __user *)instruction_pointer(regs); 477 void __user *pc = (void __user *)instruction_pointer(regs);
478 console_verbose(); 478 console_verbose();
479 479
480 pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", 480 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
481 handler[reason], esr, esr_get_class_string(esr)); 481 handler[reason], smp_processor_id(), esr,
482 esr_get_class_string(esr));
482 __show_regs(regs); 483 __show_regs(regs);
483 484
484 info.si_signo = SIGILL; 485 info.si_signo = SIGILL;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index fff7cd42b3a3..5f8f80b4a224 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
169 * Make sure stores to the GIC via the memory mapped interface 169 * Make sure stores to the GIC via the memory mapped interface
170 * are now visible to the system register interface. 170 * are now visible to the system register interface.
171 */ 171 */
172 dsb(st); 172 if (!cpu_if->vgic_sre)
173 dsb(st);
173 174
174 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); 175 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
175 176
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
190 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 191 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
191 continue; 192 continue;
192 193
193 if (cpu_if->vgic_elrsr & (1 << i)) { 194 if (cpu_if->vgic_elrsr & (1 << i))
194 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; 195 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
195 continue; 196 else
196 } 197 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
197 198
198 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
199 __gic_v3_set_lr(0, i); 199 __gic_v3_set_lr(0, i);
200 } 200 }
201 201
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
236 236
237 val = read_gicreg(ICC_SRE_EL2); 237 val = read_gicreg(ICC_SRE_EL2);
238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); 238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
239 isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ 239
240 write_gicreg(1, ICC_SRE_EL1); 240 if (!cpu_if->vgic_sre) {
241 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
242 isb();
243 write_gicreg(1, ICC_SRE_EL1);
244 }
241} 245}
242 246
243void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) 247void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
256 * been actually programmed with the value we want before 260 * been actually programmed with the value we want before
257 * starting to mess with the rest of the GIC. 261 * starting to mess with the rest of the GIC.
258 */ 262 */
259 write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); 263 if (!cpu_if->vgic_sre) {
260 isb(); 264 write_gicreg(0, ICC_SRE_EL1);
265 isb();
266 }
261 267
262 val = read_gicreg(ICH_VTR_EL2); 268 val = read_gicreg(ICH_VTR_EL2);
263 max_lr_idx = vtr_to_max_lr_idx(val); 269 max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
306 * (re)distributors. This ensure the guest will read the 312 * (re)distributors. This ensure the guest will read the
307 * correct values from the memory-mapped interface. 313 * correct values from the memory-mapped interface.
308 */ 314 */
309 isb(); 315 if (!cpu_if->vgic_sre) {
310 dsb(sy); 316 isb();
317 dsb(sy);
318 }
311 vcpu->arch.vgic_cpu.live_lrs = live_lrs; 319 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
312 320
313 /* 321 /*
314 * Prevent the guest from touching the GIC system registers if 322 * Prevent the guest from touching the GIC system registers if
315 * SRE isn't enabled for GICv3 emulation. 323 * SRE isn't enabled for GICv3 emulation.
316 */ 324 */
317 if (!cpu_if->vgic_sre) { 325 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
318 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, 326 ICC_SRE_EL2);
319 ICC_SRE_EL2);
320 }
321} 327}
322 328
323void __hyp_text __vgic_v3_init_lrs(void) 329void __hyp_text __vgic_v3_init_lrs(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7bbe3ff02602..a57d650f552c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
134 return true; 134 return true;
135} 135}
136 136
137static bool access_gic_sre(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p,
139 const struct sys_reg_desc *r)
140{
141 if (p->is_write)
142 return ignore_write(vcpu, p);
143
144 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
145 return true;
146}
147
137static bool trap_raz_wi(struct kvm_vcpu *vcpu, 148static bool trap_raz_wi(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p, 149 struct sys_reg_params *p,
139 const struct sys_reg_desc *r) 150 const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
958 access_gic_sgi }, 969 access_gic_sgi },
959 /* ICC_SRE_EL1 */ 970 /* ICC_SRE_EL1 */
960 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 971 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
961 trap_raz_wi }, 972 access_gic_sre },
962 973
963 /* CONTEXTIDR_EL1 */ 974 /* CONTEXTIDR_EL1 */
964 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 975 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b7b397802088..efcf1f7ef1e4 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
179 &asid_generation); 179 &asid_generation);
180 flush_context(cpu); 180 flush_context(cpu);
181 181
182 /* We have at least 1 ASID per CPU, so this will always succeed */ 182 /* We have more ASIDs than CPUs, so this will always succeed */
183 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 183 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
184 184
185set_asid: 185set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
227static int asids_init(void) 227static int asids_init(void)
228{ 228{
229 asid_bits = get_cpu_asid_bits(); 229 asid_bits = get_cpu_asid_bits();
230 /* If we end up with more CPUs than ASIDs, expect things to crash */ 230 /*
231 WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); 231 * Expect allocation after rollover to fail if we don't have at least
232 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
233 */
234 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
232 atomic64_set(&asid_generation, ASID_FIRST_VERSION); 235 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
233 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), 236 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
234 GFP_KERNEL); 237 GFP_KERNEL);
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 8404190fe2bd..ccfde237d6e6 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
150 150
151struct pg_level { 151struct pg_level {
152 const struct prot_bits *bits; 152 const struct prot_bits *bits;
153 const char *name;
153 size_t num; 154 size_t num;
154 u64 mask; 155 u64 mask;
155}; 156};
@@ -157,15 +158,19 @@ struct pg_level {
157static struct pg_level pg_level[] = { 158static struct pg_level pg_level[] = {
158 { 159 {
159 }, { /* pgd */ 160 }, { /* pgd */
161 .name = "PGD",
160 .bits = pte_bits, 162 .bits = pte_bits,
161 .num = ARRAY_SIZE(pte_bits), 163 .num = ARRAY_SIZE(pte_bits),
162 }, { /* pud */ 164 }, { /* pud */
165 .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
163 .bits = pte_bits, 166 .bits = pte_bits,
164 .num = ARRAY_SIZE(pte_bits), 167 .num = ARRAY_SIZE(pte_bits),
165 }, { /* pmd */ 168 }, { /* pmd */
169 .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
166 .bits = pte_bits, 170 .bits = pte_bits,
167 .num = ARRAY_SIZE(pte_bits), 171 .num = ARRAY_SIZE(pte_bits),
168 }, { /* pte */ 172 }, { /* pte */
173 .name = "PTE",
169 .bits = pte_bits, 174 .bits = pte_bits,
170 .num = ARRAY_SIZE(pte_bits), 175 .num = ARRAY_SIZE(pte_bits),
171 }, 176 },
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
214 delta >>= 10; 219 delta >>= 10;
215 unit++; 220 unit++;
216 } 221 }
217 seq_printf(st->seq, "%9lu%c", delta, *unit); 222 seq_printf(st->seq, "%9lu%c %s", delta, *unit,
223 pg_level[st->level].name);
218 if (pg_level[st->level].bits) 224 if (pg_level[st->level].bits)
219 dump_prot(st, pg_level[st->level].bits, 225 dump_prot(st, pg_level[st->level].bits,
220 pg_level[st->level].num); 226 pg_level[st->level].num);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5954881a35ac..013e2cbe7924 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
109 * PTE_RDONLY is cleared by default in the asm below, so set it in 109 * PTE_RDONLY is cleared by default in the asm below, so set it in
110 * back if necessary (read-only or clean PTE). 110 * back if necessary (read-only or clean PTE).
111 */ 111 */
112 if (!pte_write(entry) || !dirty) 112 if (!pte_write(entry) || !pte_sw_dirty(entry))
113 pte_val(entry) |= PTE_RDONLY; 113 pte_val(entry) |= PTE_RDONLY;
114 114
115 /* 115 /*
@@ -441,7 +441,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
441 return 1; 441 return 1;
442} 442}
443 443
444static struct fault_info { 444static const struct fault_info {
445 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); 445 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
446 int sig; 446 int sig;
447 int code; 447 int code;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dbd12ea8ce68..43a76b07eb32 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
71{ 71{
72 struct page *page = pte_page(pte); 72 struct page *page = pte_page(pte);
73 73
74 /* no flushing needed for anonymous pages */
75 if (!page_mapping(page))
76 return;
77
78 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 74 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
79 sync_icache_aliases(page_address(page), 75 sync_icache_aliases(page_address(page),
80 PAGE_SIZE << compound_order(page)); 76 PAGE_SIZE << compound_order(page));
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index aa8aee7d6929..2e49bd252fe7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
307 } else if (ps == PUD_SIZE) { 307 } else if (ps == PUD_SIZE) {
308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
309 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
310 hugetlb_add_hstate(CONT_PTE_SHIFT);
311 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
312 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
309 } else { 313 } else {
310 hugetlb_bad_size(); 314 hugetlb_bad_size();
311 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 315 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
314 return 1; 318 return 1;
315} 319}
316__setup("hugepagesz=", setup_hugepagesz); 320__setup("hugepagesz=", setup_hugepagesz);
321
322#ifdef CONFIG_ARM64_64K_PAGES
323static __init int add_default_hugepagesz(void)
324{
325 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
326 hugetlb_add_hstate(CONT_PMD_SHIFT);
327 return 0;
328}
329arch_initcall(add_default_hugepagesz);
330#endif
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 1aba19d68c5e..db039cb368be 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
43 */ 43 */
44static inline pgd_t *pgd_alloc(struct mm_struct *mm) 44static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45{ 45{
46 return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); 46 return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
47} 47}
48 48
49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
54static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 54static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
55 unsigned long address) 55 unsigned long address)
56{ 56{
57 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 57 return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
58} 58}
59 59
60static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 60static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
63 struct page *page; 63 struct page *page;
64 void *pg; 64 void *pg;
65 65
66 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 66 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
67 if (!pg) 67 if (!pg)
68 return NULL; 68 return NULL;
69 69
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index 235ece437ddd..42f1affb9c2d 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
24 24
25static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 25static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
26{ 26{
27 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 27 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
28 return pte; 28 return pte;
29} 29}
30 30
31static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 31static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32{ 32{
33 struct page *pte; 33 struct page *pte;
34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 34 pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
35 if (!pte) 35 if (!pte)
36 return NULL; 36 return NULL;
37 if (!pgtable_page_ctor(pte)) { 37 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 41907d25ed38..c9ed14f6c67d 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
22 22
23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
24{ 24{
25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
26 if (pte) 26 if (pte)
27 clear_page(pte); 27 clear_page(pte);
28 return pte; 28 return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
33 struct page *page; 33 struct page *page;
34 34
35#ifdef CONFIG_HIGHPTE 35#ifdef CONFIG_HIGHPTE
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); 36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
37#else 37#else
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 38 page = alloc_pages(GFP_KERNEL, 0);
39#endif 39#endif
40 if (!page) 40 if (!page)
41 return NULL; 41 return NULL;
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 77da3b0ae3c2..eeebf862c46c 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
64{ 64{
65 struct page *pte; 65 struct page *pte;
66 66
67 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 67 pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
68 if (!pte) 68 if (!pte)
69 return NULL; 69 return NULL;
70 if (!pgtable_page_ctor(pte)) { 70 if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
78static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 78static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
79 unsigned long address) 79 unsigned long address)
80{ 80{
81 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; 81 gfp_t flags = GFP_KERNEL | __GFP_ZERO;
82 return (pte_t *) __get_free_page(flags); 82 return (pte_t *) __get_free_page(flags);
83} 83}
84 84
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index f80758cb7157..e109ee95e919 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -45,7 +45,7 @@ config IA64
45 select GENERIC_SMP_IDLE_THREAD 45 select GENERIC_SMP_IDLE_THREAD
46 select ARCH_INIT_TASK 46 select ARCH_INIT_TASK
47 select ARCH_TASK_STRUCT_ALLOCATOR 47 select ARCH_TASK_STRUCT_ALLOCATOR
48 select ARCH_THREAD_INFO_ALLOCATOR 48 select ARCH_THREAD_STACK_ALLOCATOR
49 select ARCH_CLOCKSOURCE_DATA 49 select ARCH_CLOCKSOURCE_DATA
50 select GENERIC_TIME_VSYSCALL_OLD 50 select GENERIC_TIME_VSYSCALL_OLD
51 select SYSCTL_ARCH_UNALIGN_NO_WARN 51 select SYSCTL_ARCH_UNALIGN_NO_WARN
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f5..d1212b84fb83 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,15 +48,15 @@ struct thread_info {
48#ifndef ASM_OFFSETS_C 48#ifndef ASM_OFFSETS_C
49/* how to get the thread information struct from C */ 49/* how to get the thread information struct from C */
50#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 50#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
51#define alloc_thread_info_node(tsk, node) \ 51#define alloc_thread_stack_node(tsk, node) \
52 ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 52 ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
53#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 53#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
54#else 54#else
55#define current_thread_info() ((struct thread_info *) 0) 55#define current_thread_info() ((struct thread_info *) 0)
56#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) 56#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
57#define task_thread_info(tsk) ((struct thread_info *) 0) 57#define task_thread_info(tsk) ((struct thread_info *) 0)
58#endif 58#endif
59#define free_thread_info(ti) /* nothing */ 59#define free_thread_stack(ti) /* nothing */
60#define task_stack_page(tsk) ((void *)(tsk)) 60#define task_stack_page(tsk) ((void *)(tsk))
61 61
62#define __HAVE_THREAD_FUNCTIONS 62#define __HAVE_THREAD_FUNCTIONS
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index f9efe9739d3f..0eaa89f3defd 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
26 * handled. This is done by having a special ".data..init_task" section... 26 * handled. This is done by having a special ".data..init_task" section...
27 */ 27 */
28#define init_thread_info init_task_mem.s.thread_info 28#define init_thread_info init_task_mem.s.thread_info
29#define init_stack init_task_mem.stack
29 30
30union { 31union {
31 struct { 32 struct {
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index f9924fbcfe42..fb95aed5f428 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
14extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 14extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
15 unsigned long address) 15 unsigned long address)
16{ 16{
17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); 17 unsigned long page = __get_free_page(GFP_DMA);
18 18
19 if (!page) 19 if (!page)
20 return NULL; 20 return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
51static inline struct page *pte_alloc_one(struct mm_struct *mm, 51static inline struct page *pte_alloc_one(struct mm_struct *mm,
52 unsigned long address) 52 unsigned long address)
53{ 53{
54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); 54 struct page *page = alloc_pages(GFP_DMA, 0);
55 pte_t *pte; 55 pte_t *pte;
56 56
57 if (!page) 57 if (!page)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 24bcba496c75..c895b987202c 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
11{ 11{
12 pte_t *pte; 12 pte_t *pte;
13 13
14 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 14 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
15 if (pte) { 15 if (pte) {
16 __flush_page_to_ram(pte); 16 __flush_page_to_ram(pte);
17 flush_tlb_kernel_page(pte); 17 flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
32 struct page *page; 32 struct page *page;
33 pte_t *pte; 33 pte_t *pte;
34 34
35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 35 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
36 if(!page) 36 if(!page)
37 return NULL; 37 return NULL;
38 if (!pgtable_page_ctor(page)) { 38 if (!pgtable_page_ctor(page)) {
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 0931388de47f..1901f61f926f 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -37,7 +37,7 @@ do { \
37static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 37static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
38 unsigned long address) 38 unsigned long address)
39{ 39{
40 unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); 40 unsigned long page = __get_free_page(GFP_KERNEL);
41 41
42 if (!page) 42 if (!page)
43 return NULL; 43 return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
49static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 49static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
50 unsigned long address) 50 unsigned long address)
51{ 51{
52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 52 struct page *page = alloc_pages(GFP_KERNEL, 0);
53 53
54 if (page == NULL) 54 if (page == NULL)
55 return NULL; 55 return NULL;
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
index 3104df0a4822..c2caa1ee4360 100644
--- a/arch/metag/include/asm/pgalloc.h
+++ b/arch/metag/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
43 unsigned long address) 43 unsigned long address)
44{ 44{
45 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | 45 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 __GFP_ZERO);
47 return pte; 46 return pte;
48} 47}
49 48
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
51 unsigned long address) 50 unsigned long address)
52{ 51{
53 struct page *pte; 52 struct page *pte;
54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); 53 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
55 if (!pte) 54 if (!pte)
56 return NULL; 55 return NULL;
57 if (!pgtable_page_ctor(pte)) { 56 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 61436d69775c..7c89390c0c13 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
116 struct page *ptepage; 116 struct page *ptepage;
117 117
118#ifdef CONFIG_HIGHPTE 118#ifdef CONFIG_HIGHPTE
119 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; 119 int flags = GFP_KERNEL | __GFP_HIGHMEM;
120#else 120#else
121 int flags = GFP_KERNEL | __GFP_REPEAT; 121 int flags = GFP_KERNEL;
122#endif 122#endif
123 123
124 ptepage = alloc_pages(flags, 0); 124 ptepage = alloc_pages(flags, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 4f4520e779a5..eb99fcc76088 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
239{ 239{
240 pte_t *pte; 240 pte_t *pte;
241 if (mem_init_done) { 241 if (mem_init_done) {
242 pte = (pte_t *)__get_free_page(GFP_KERNEL | 242 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
243 __GFP_REPEAT | __GFP_ZERO);
244 } else { 243 } else {
245 pte = (pte_t *)early_get_page(); 244 pte = (pte_t *)early_get_page();
246 if (pte) 245 if (pte)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6733ac575da4..36a391d289aa 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -74,7 +74,7 @@
74#define KVM_GUEST_KUSEG 0x00000000UL 74#define KVM_GUEST_KUSEG 0x00000000UL
75#define KVM_GUEST_KSEG0 0x40000000UL 75#define KVM_GUEST_KSEG0 0x40000000UL
76#define KVM_GUEST_KSEG23 0x60000000UL 76#define KVM_GUEST_KSEG23 0x60000000UL
77#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) 77#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
78#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 78#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
79 79
80#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 80#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
338#define KVM_MIPS_GUEST_TLB_SIZE 64 338#define KVM_MIPS_GUEST_TLB_SIZE 64
339struct kvm_vcpu_arch { 339struct kvm_vcpu_arch {
340 void *host_ebase, *guest_ebase; 340 void *host_ebase, *guest_ebase;
341 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
341 unsigned long host_stack; 342 unsigned long host_stack;
342 unsigned long host_gp; 343 unsigned long host_gp;
343 344
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index b336037e8768..93c079a1cfc8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
69{ 69{
70 pte_t *pte; 70 pte_t *pte;
71 71
72 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); 72 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
73 73
74 return pte; 74 return pte;
75} 75}
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
79{ 79{
80 struct page *pte; 80 struct page *pte;
81 81
82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 82 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
83 if (!pte) 83 if (!pte)
84 return NULL; 84 return NULL;
85 clear_highpage(pte); 85 clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
113{ 113{
114 pmd_t *pmd; 114 pmd_t *pmd;
115 115
116 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); 116 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
117 if (pmd) 117 if (pmd)
118 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); 118 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
119 return pmd; 119 return pmd;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 396df6eb0a12..645c8a1982a7 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1636 if (index < 0) { 1636 if (index < 0) {
1637 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 1637 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1638 vcpu->arch.host_cp0_badvaddr = va; 1638 vcpu->arch.host_cp0_badvaddr = va;
1639 vcpu->arch.pc = curr_pc;
1639 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, 1640 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1640 vcpu); 1641 vcpu);
1641 preempt_enable(); 1642 preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1647 * invalid exception to the guest 1648 * invalid exception to the guest
1648 */ 1649 */
1649 if (!TLB_IS_VALID(*tlb, va)) { 1650 if (!TLB_IS_VALID(*tlb, va)) {
1651 vcpu->arch.host_cp0_badvaddr = va;
1652 vcpu->arch.pc = curr_pc;
1650 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1653 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1651 run, vcpu); 1654 run, vcpu);
1652 preempt_enable(); 1655 preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1666 cache, op, base, arch->gprs[base], offset); 1669 cache, op, base, arch->gprs[base], offset);
1667 er = EMULATE_FAIL; 1670 er = EMULATE_FAIL;
1668 preempt_enable(); 1671 preempt_enable();
1669 goto dont_update_pc; 1672 goto done;
1670 1673
1671 } 1674 }
1672 1675
@@ -1694,16 +1697,20 @@ skip_fault:
1694 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1697 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1695 cache, op, base, arch->gprs[base], offset); 1698 cache, op, base, arch->gprs[base], offset);
1696 er = EMULATE_FAIL; 1699 er = EMULATE_FAIL;
1697 preempt_enable();
1698 goto dont_update_pc;
1699 } 1700 }
1700 1701
1701 preempt_enable(); 1702 preempt_enable();
1703done:
1704 /* Rollback PC only if emulation was unsuccessful */
1705 if (er == EMULATE_FAIL)
1706 vcpu->arch.pc = curr_pc;
1702 1707
1703dont_update_pc: 1708dont_update_pc:
1704 /* Rollback PC */ 1709 /*
1705 vcpu->arch.pc = curr_pc; 1710 * This is for exceptions whose emulation updates the PC, so do not
1706done: 1711 * overwrite the PC under any circumstances
1712 */
1713
1707 return er; 1714 return er;
1708} 1715}
1709 1716
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 4ab4bdfad703..2143884709e4 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,6 +28,7 @@
28#define MIPS_EXC_MAX 12 28#define MIPS_EXC_MAX 12
29/* XXXSL More to follow */ 29/* XXXSL More to follow */
30 30
31extern char __kvm_mips_vcpu_run_end[];
31extern char mips32_exception[], mips32_exceptionEnd[]; 32extern char mips32_exception[], mips32_exceptionEnd[];
32extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; 33extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
33 34
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 3ef03009de5f..828fcfc1cd7f 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
202 202
203 /* Jump to guest */ 203 /* Jump to guest */
204 eret 204 eret
205EXPORT(__kvm_mips_vcpu_run_end)
205 206
206VECTOR(MIPSX(exception), unknown) 207VECTOR(MIPSX(exception), unknown)
207/* Find out what mode we came from and jump to the proper handler. */ 208/* Find out what mode we came from and jump to the proper handler. */
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index dc052fb5c7a2..44da5259f390 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
315 memcpy(gebase + offset, mips32_GuestException, 315 memcpy(gebase + offset, mips32_GuestException,
316 mips32_GuestExceptionEnd - mips32_GuestException); 316 mips32_GuestExceptionEnd - mips32_GuestException);
317 317
318#ifdef MODULE
319 offset += mips32_GuestExceptionEnd - mips32_GuestException;
320 memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
321 __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
322 vcpu->arch.vcpu_run = gebase + offset;
323#else
324 vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
325#endif
326
318 /* Invalidate the icache for these ranges */ 327 /* Invalidate the icache for these ranges */
319 local_flush_icache_range((unsigned long)gebase, 328 local_flush_icache_range((unsigned long)gebase,
320 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 329 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
404 /* Disable hardware page table walking while in guest */ 413 /* Disable hardware page table walking while in guest */
405 htw_stop(); 414 htw_stop();
406 415
407 r = __kvm_mips_vcpu_run(run, vcpu); 416 r = vcpu->arch.vcpu_run(run, vcpu);
408 417
409 /* Re-enable HTW before enabling interrupts */ 418 /* Re-enable HTW before enabling interrupts */
410 htw_start(); 419 htw_start();
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 4861a78c7160..f5f90bbf019d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
115} 115}
116 116
117#ifndef CONFIG_KGDB 117#ifndef CONFIG_KGDB
118void arch_release_thread_info(struct thread_info *ti); 118void arch_release_thread_stack(unsigned long *stack);
119#endif 119#endif
120#define get_thread_info(ti) get_task_struct((ti)->task) 120#define get_thread_info(ti) get_task_struct((ti)->task)
121#define put_thread_info(ti) put_task_struct((ti)->task) 121#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c
index 99770823451a..2d7986c386fe 100644
--- a/arch/mn10300/kernel/kgdb.c
+++ b/arch/mn10300/kernel/kgdb.c
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
397 * single-step state is cleared. At this point the breakpoints should have 397 * single-step state is cleared. At this point the breakpoints should have
398 * been removed by __switch_to(). 398 * been removed by __switch_to().
399 */ 399 */
400void arch_release_thread_info(struct thread_info *ti) 400void arch_release_thread_stack(unsigned long *stack)
401{ 401{
402 struct thread_info *ti = (void *)stack;
402 if (kgdb_sstep_thread == ti) { 403 if (kgdb_sstep_thread == ti) {
403 kgdb_sstep_thread = NULL; 404 kgdb_sstep_thread = NULL;
404 405
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index e77a7c728081..9577cf768875 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
63 63
64pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 64pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
65{ 65{
66 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 66 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
67 if (pte) 67 if (pte)
68 clear_page(pte); 68 clear_page(pte);
69 return pte; 69 return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
74 struct page *pte; 74 struct page *pte;
75 75
76#ifdef CONFIG_HIGHPTE 76#ifdef CONFIG_HIGHPTE
77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); 77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
78#else 78#else
79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 79 pte = alloc_pages(GFP_KERNEL, 0);
80#endif 80#endif
81 if (!pte) 81 if (!pte)
82 return NULL; 82 return NULL;
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 6e2985e0a7b9..bb47d08c8ef7 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42{ 42{
43 pte_t *pte; 43 pte_t *pte;
44 44
45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
46 PTE_ORDER);
47 46
48 return pte; 47 return pte;
49} 48}
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
53{ 52{
54 struct page *pte; 53 struct page *pte;
55 54
56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 55 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
57 if (pte) { 56 if (pte) {
58 if (!pgtable_page_ctor(pte)) { 57 if (!pgtable_page_ctor(pte)) {
59 __free_page(pte); 58 __free_page(pte);
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 21484e5b9e9a..87eebd185089 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
77 unsigned long address) 77 unsigned long address)
78{ 78{
79 struct page *pte; 79 struct page *pte;
80 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 80 pte = alloc_pages(GFP_KERNEL, 0);
81 if (!pte) 81 if (!pte)
82 return NULL; 82 return NULL;
83 clear_page(page_address(pte)); 83 clear_page(page_address(pte));
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 62b08ef392be..5b2a95116e8f 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
122 pte_t *pte; 122 pte_t *pte;
123 123
124 if (likely(mem_init_done)) { 124 if (likely(mem_init_done)) {
125 pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); 125 pte = (pte_t *) __get_free_page(GFP_KERNEL);
126 } else { 126 } else {
127 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 127 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
128#if 0 128#if 0
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f2fd327dce2e..f08dda3f0995 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
63 63
64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
65{ 65{
66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
67 PMD_ORDER);
68 if (pmd) 67 if (pmd)
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); 68 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
70 return pmd; 69 return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
124static inline pgtable_t 123static inline pgtable_t
125pte_alloc_one(struct mm_struct *mm, unsigned long address) 124pte_alloc_one(struct mm_struct *mm, unsigned long address)
126{ 125{
127 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 126 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
128 if (!page) 127 if (!page)
129 return NULL; 128 return NULL;
130 if (!pgtable_page_ctor(page)) { 129 if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
137static inline pte_t * 136static inline pte_t *
138pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 137pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139{ 138{
140 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 139 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
141 return pte; 140 return pte;
142} 141}
143 142
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 4736020ba5ea..5e953ab4530d 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -8,6 +8,8 @@ struct pt_regs;
8void parisc_terminate(char *msg, struct pt_regs *regs, 8void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset) __noreturn __cold; 9 int code, unsigned long offset) __noreturn __cold;
10 10
11void die_if_kernel(char *str, struct pt_regs *regs, long err);
12
11/* mm/fault.c */ 13/* mm/fault.c */
12void do_page_fault(struct pt_regs *regs, unsigned long code, 14void do_page_fault(struct pt_regs *regs, unsigned long code,
13 unsigned long address); 15 unsigned long address);
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e81ccf1716e9..5adc339eb7c8 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
324 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; 324 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
325 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; 325 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
326 326
327 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", 327 if (cpunum == 0)
328 cpunum, coproc_cfg.revision, coproc_cfg.model); 328 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
329 cpunum, coproc_cfg.revision, coproc_cfg.model);
329 330
330 /* 331 /*
331 ** store status register to stack (hopefully aligned) 332 ** store status register to stack (hopefully aligned)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 58dd6801f5be..31ec99a5f119 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -309,11 +309,6 @@ void __init time_init(void)
309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, 309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
310 NSEC_PER_MSEC, 0); 310 NSEC_PER_MSEC, 0);
311 311
312#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
313 /* At bootup only one 64bit CPU is online and cr16 is "stable" */
314 set_sched_clock_stable();
315#endif
316
317 start_cpu_itimer(); /* get CPU 0 started */ 312 start_cpu_itimer(); /* get CPU 0 started */
318 313
319 /* register at clocksource framework */ 314 /* register at clocksource framework */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index d7c0acb35ec2..2b65c0177778 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -28,6 +28,7 @@
28#include <linux/ratelimit.h> 28#include <linux/ratelimit.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/hardirq.h> 30#include <asm/hardirq.h>
31#include <asm/traps.h>
31 32
32/* #define DEBUG_UNALIGNED 1 */ 33/* #define DEBUG_UNALIGNED 1 */
33 34
@@ -130,8 +131,6 @@
130 131
131int unaligned_enabled __read_mostly = 1; 132int unaligned_enabled __read_mostly = 1;
132 133
133void die_if_kernel (char *str, struct pt_regs *regs, long err);
134
135static int emulate_ldh(struct pt_regs *regs, int toreg) 134static int emulate_ldh(struct pt_regs *regs, int toreg)
136{ 135{
137 unsigned long saddr = regs->ior; 136 unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
666 break; 665 break;
667 } 666 }
668 667
669 if (modify && R1(regs->iir)) 668 if (ret == 0 && modify && R1(regs->iir))
670 regs->gr[R1(regs->iir)] = newbase; 669 regs->gr[R1(regs->iir)] = newbase;
671 670
672 671
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
677 676
678 if (ret) 677 if (ret)
679 { 678 {
679 /*
680 * The unaligned handler failed.
681 * If we were called by __get_user() or __put_user() jump
682 * to it's exception fixup handler instead of crashing.
683 */
684 if (!user_mode(regs) && fixup_exception(regs))
685 return;
686
680 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); 687 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
681 die_if_kernel("Unaligned data reference", regs, 28); 688 die_if_kernel("Unaligned data reference", regs, 28);
682 689
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index ddd988b267a9..e278a87f43cc 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
75 if (addr >= kernel_unwind_table.start && 75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end) 76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else 78 else {
79 unsigned long flags;
80
81 spin_lock_irqsave(&unwind_lock, flags);
79 list_for_each_entry(table, &unwind_tables, list) { 82 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start && 83 if (addr >= table->start &&
81 addr <= table->end) 84 addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
86 break; 89 break;
87 } 90 }
88 } 91 }
92 spin_unlock_irqrestore(&unwind_lock, flags);
93 }
89 94
90 return e; 95 return e;
91} 96}
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
303 308
304 insn = *(unsigned int *)npc; 309 insn = *(unsigned int *)npc;
305 310
306 if ((insn & 0xffffc000) == 0x37de0000 || 311 if ((insn & 0xffffc001) == 0x37de0000 ||
307 (insn & 0xffe00000) == 0x6fc00000) { 312 (insn & 0xffe00001) == 0x6fc00000) {
308 /* ldo X(sp), sp, or stwm X,D(sp) */ 313 /* ldo X(sp), sp, or stwm X,D(sp) */
309 frame_size += (insn & 0x1 ? -1 << 13 : 0) | 314 frame_size += (insn & 0x3fff) >> 1;
310 ((insn & 0x3fff) >> 1);
311 dbg("analyzing func @ %lx, insn=%08x @ " 315 dbg("analyzing func @ %lx, insn=%08x @ "
312 "%lx, frame_size = %ld\n", info->ip, 316 "%lx, frame_size = %ld\n", info->ip,
313 insn, npc, frame_size); 317 insn, npc, frame_size);
314 } else if ((insn & 0xffe00008) == 0x73c00008) { 318 } else if ((insn & 0xffe00009) == 0x73c00008) {
315 /* std,ma X,D(sp) */ 319 /* std,ma X,D(sp) */
316 frame_size += (insn & 0x1 ? -1 << 13 : 0) | 320 frame_size += ((insn >> 4) & 0x3ff) << 3;
317 (((insn >> 4) & 0x3ff) << 3);
318 dbg("analyzing func @ %lx, insn=%08x @ " 321 dbg("analyzing func @ %lx, insn=%08x @ "
319 "%lx, frame_size = %ld\n", info->ip, 322 "%lx, frame_size = %ld\n", info->ip,
320 insn, npc, frame_size); 323 insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
333 } 336 }
334 } 337 }
335 338
339 if (frame_size > e->Total_frame_size << 3)
340 frame_size = e->Total_frame_size << 3;
341
336 if (!unwind_special(info, e->region_start, frame_size)) { 342 if (!unwind_special(info, e->region_start, frame_size)) {
337 info->prev_sp = info->sp - frame_size; 343 info->prev_sp = info->sp - frame_size;
338 if (e->Millicode) 344 if (e->Millicode)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 01f7464d9fea..0a9d439bcda6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -128,7 +128,7 @@ config PPC
128 select IRQ_FORCED_THREADING 128 select IRQ_FORCED_THREADING
129 select HAVE_RCU_TABLE_FREE if SMP 129 select HAVE_RCU_TABLE_FREE if SMP
130 select HAVE_SYSCALL_TRACEPOINTS 130 select HAVE_SYSCALL_TRACEPOINTS
131 select HAVE_CBPF_JIT 131 select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
132 select HAVE_ARCH_JUMP_LABEL 132 select HAVE_ARCH_JUMP_LABEL
133 select ARCH_HAVE_NMI_SAFE_CMPXCHG 133 select ARCH_HAVE_NMI_SAFE_CMPXCHG
134 select ARCH_HAS_GCOV_PROFILE_ALL 134 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index a2350194fc76..8e21bb492dca 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
102static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 102static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
103 unsigned long address) 103 unsigned long address)
104{ 104{
105 tlb_flush_pgtable(tlb, address);
106 pgtable_page_dtor(table); 105 pgtable_page_dtor(table);
107 pgtable_free_tlb(tlb, page_address(table), 0); 106 pgtable_free_tlb(tlb, page_address(table), 0);
108} 107}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 290157e8d5b2..74839f24f412 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -88,6 +88,7 @@
88#define HPTE_R_RPN_SHIFT 12 88#define HPTE_R_RPN_SHIFT 12
89#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) 89#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
90#define HPTE_R_PP ASM_CONST(0x0000000000000003) 90#define HPTE_R_PP ASM_CONST(0x0000000000000003)
91#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
91#define HPTE_R_N ASM_CONST(0x0000000000000004) 92#define HPTE_R_N ASM_CONST(0x0000000000000004)
92#define HPTE_R_G ASM_CONST(0x0000000000000008) 93#define HPTE_R_G ASM_CONST(0x0000000000000008)
93#define HPTE_R_M ASM_CONST(0x0000000000000010) 94#define HPTE_R_M ASM_CONST(0x0000000000000010)
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 488279edb1f0..cd5e7aa8cc34 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
41 pgtable_cache[(shift) - 1]; \ 41 pgtable_cache[(shift) - 1]; \
42 }) 42 })
43 43
44#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 44#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
45 45
46extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 46extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
47extern void pte_fragment_free(unsigned long *, int); 47extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
56 return (pgd_t *)__get_free_page(PGALLOC_GFP); 56 return (pgd_t *)__get_free_page(PGALLOC_GFP);
57#else 57#else
58 struct page *page; 58 struct page *page;
59 page = alloc_pages(PGALLOC_GFP, 4); 59 page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
60 if (!page) 60 if (!page)
61 return NULL; 61 return NULL;
62 return (pgd_t *) page_address(page); 62 return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
93 93
94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95{ 95{
96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
97 GFP_KERNEL|__GFP_REPEAT);
98} 97}
99 98
100static inline void pud_free(struct mm_struct *mm, pud_t *pud) 99static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
110static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 109static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
111 unsigned long address) 110 unsigned long address)
112{ 111{
112 /*
113 * By now all the pud entries should be none entries. So go
114 * ahead and flush the page walk cache
115 */
116 flush_tlb_pgtable(tlb, address);
113 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); 117 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
114} 118}
115 119
116static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 120static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
117{ 121{
118 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 122 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
119 GFP_KERNEL|__GFP_REPEAT);
120} 123}
121 124
122static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 125static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
127static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 130static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
128 unsigned long address) 131 unsigned long address)
129{ 132{
133 /*
134 * By now all the pud entries should be none entries. So go
135 * ahead and flush the page walk cache
136 */
137 flush_tlb_pgtable(tlb, address);
130 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); 138 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
131} 139}
132 140
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
151static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 159static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
152 unsigned long address) 160 unsigned long address)
153{ 161{
154 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 162 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
155} 163}
156 164
157static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 165static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
198static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 206static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
199 unsigned long address) 207 unsigned long address)
200{ 208{
201 tlb_flush_pgtable(tlb, address); 209 /*
210 * By now all the pud entries should be none entries. So go
211 * ahead and flush the page walk cache
212 */
213 flush_tlb_pgtable(tlb, address);
202 pgtable_free_tlb(tlb, table, 0); 214 pgtable_free_tlb(tlb, table, 0);
203} 215}
204 216
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 937d4e247ac3..df294224e280 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
228 228
229extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, 229extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
230 pgprot_t flags, unsigned int psz); 230 pgprot_t flags, unsigned int psz);
231
232static inline unsigned long radix__get_tree_size(void)
233{
234 unsigned long rts_field;
235 /*
236 * we support 52 bits, hence 52-31 = 21, 0b10101
237 * RTS encoding details
238 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
239 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
240 */
241 rts_field = (0x5UL << 5); /* 6 - 8 bits */
242 rts_field |= (0x2UL << 61);
243
244 return rts_field;
245}
231#endif /* __ASSEMBLY__ */ 246#endif /* __ASSEMBLY__ */
232#endif 247#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 13ef38828dfe..3fa94fcac628 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
18extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 18extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
19extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 19extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
20 unsigned long ap, int nid); 20 unsigned long ap, int nid);
21extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
21extern void radix__tlb_flush(struct mmu_gather *tlb); 22extern void radix__tlb_flush(struct mmu_gather *tlb);
22#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
23extern void radix__flush_tlb_mm(struct mm_struct *mm); 24extern void radix__flush_tlb_mm(struct mm_struct *mm);
24extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 25extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
25extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 26extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
26 unsigned long ap, int nid); 27 unsigned long ap, int nid);
28extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
27#else 29#else
28#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) 30#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
29#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) 31#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
30#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) 32#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
33#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
31#endif 34#endif
32 35
33#endif 36#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index d98424ae356c..96e5769b18b0 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) 73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
74#endif /* CONFIG_SMP */ 74#endif /* CONFIG_SMP */
75/*
76 * flush the page walk cache for the address
77 */
78static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
79{
80 /*
81 * Flush the page table walk cache on freeing a page table. We already
82 * have marked the upper/higher level page table entry none by now.
83 * So it is safe to flush PWC here.
84 */
85 if (!radix_enabled())
86 return;
75 87
88 radix__flush_tlb_pwc(tlb, address);
89}
76#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ 90#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h
index 54f591e9572e..c0a69ae92256 100644
--- a/arch/powerpc/include/asm/book3s/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/pgalloc.h
@@ -4,11 +4,6 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5
6extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 6extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
7static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
8 unsigned long address)
9{
10
11}
12 7
13#ifdef CONFIG_PPC64 8#ifdef CONFIG_PPC64
14#include <asm/book3s/64/pgalloc.h> 9#include <asm/book3s/64/pgalloc.h>
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0c12a3bfe2ab..897d2e1c8a9b 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
57 57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
59{ 59{
60 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 60 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
61 GFP_KERNEL|__GFP_REPEAT);
62} 61}
63 62
64static inline void pud_free(struct mm_struct *mm, pud_t *pud) 63static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
88static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 87static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
89 unsigned long address) 88 unsigned long address)
90{ 89{
91 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 90 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
92} 91}
93 92
94static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 93static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
172 171
173static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 172static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
174{ 173{
175 pte_fragment_fre((unsigned long *)pte, 1); 174 pte_fragment_free((unsigned long *)pte, 1);
176} 175}
177 176
178static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 177static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
190 189
191static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 190static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
192{ 191{
193 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 192 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
194 GFP_KERNEL|__GFP_REPEAT);
195} 193}
196 194
197static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 195static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c1e82e968506..a0948f40bc7b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -717,7 +717,7 @@
717#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ 717#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
718#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ 718#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
719#define SPRN_MMCR1 798 719#define SPRN_MMCR1 798
720#define SPRN_MMCR2 769 720#define SPRN_MMCR2 785
721#define SPRN_MMCRA 0x312 721#define SPRN_MMCRA 0x312
722#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ 722#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
723#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL 723#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -754,13 +754,13 @@
754#define SPRN_PMC6 792 754#define SPRN_PMC6 792
755#define SPRN_PMC7 793 755#define SPRN_PMC7 793
756#define SPRN_PMC8 794 756#define SPRN_PMC8 794
757#define SPRN_SIAR 780
758#define SPRN_SDAR 781
759#define SPRN_SIER 784 757#define SPRN_SIER 784
760#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ 758#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
761#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ 759#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
762#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ 760#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
763#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ 761#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
762#define SPRN_SIAR 796
763#define SPRN_SDAR 797
764#define SPRN_TACR 888 764#define SPRN_TACR 888
765#define SPRN_TCSCR 889 765#define SPRN_TCSCR 889
766#define SPRN_CSIGR 890 766#define SPRN_CSIGR 890
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2714a3b81d24..b5f73cb5eeb6 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -642,7 +642,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
642 if (pe->type & EEH_PE_VF) { 642 if (pe->type & EEH_PE_VF) {
643 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); 643 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
644 } else { 644 } else {
645 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
646 pci_lock_rescan_remove(); 645 pci_lock_rescan_remove();
647 pci_hp_remove_devices(bus); 646 pci_hp_remove_devices(bus);
648 pci_unlock_rescan_remove(); 647 pci_unlock_rescan_remove();
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
692 */ 691 */
693 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 692 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
694 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 693 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
695 if (pe->type & EEH_PE_VF) 694 if (pe->type & EEH_PE_VF) {
696 eeh_add_virt_device(edev, NULL); 695 eeh_add_virt_device(edev, NULL);
697 else 696 } else {
697 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
698 pci_hp_add_devices(bus); 698 pci_hp_add_devices(bus);
699 }
699 } else if (frozen_bus && rmv_data->removed) { 700 } else if (frozen_bus && rmv_data->removed) {
700 pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); 701 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
701 ssleep(5); 702 ssleep(5);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4c9440629128..8bcc1b457115 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
1399 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1399 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1400 1400
1401 mtlr r10 1401 mtlr r10
1402BEGIN_MMU_FTR_SECTION
1403 b 2f
1404END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
1405 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1402 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1403BEGIN_MMU_FTR_SECTION
1406 beq- 2f 1404 beq- 2f
1405FTR_SECTION_ELSE
1406 b 2f
1407ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
1407 1408
1408.machine push 1409.machine push
1409.machine "power4" 1410.machine "power4"
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index da5192590c44..6ee4b72cda42 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
656 W(0xffff0000), W(0x003e0000), /* POWER6 */ 656 W(0xffff0000), W(0x003e0000), /* POWER6 */
657 W(0xffff0000), W(0x003f0000), /* POWER7 */ 657 W(0xffff0000), W(0x003f0000), /* POWER7 */
658 W(0xffff0000), W(0x004b0000), /* POWER8E */ 658 W(0xffff0000), W(0x004b0000), /* POWER8E */
659 W(0xffff0000), W(0x004c0000), /* POWER8NVL */
659 W(0xffff0000), W(0x004d0000), /* POWER8 */ 660 W(0xffff0000), W(0x004d0000), /* POWER8 */
660 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 661 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
661 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 662 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
@@ -718,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
718 * must match by the macro below. Update the definition if 719 * must match by the macro below. Update the definition if
719 * the structure layout changes. 720 * the structure layout changes.
720 */ 721 */
721#define IBM_ARCH_VEC_NRCORES_OFFSET 125 722#define IBM_ARCH_VEC_NRCORES_OFFSET 133
722 W(NR_CPUS), /* number of cores supported */ 723 W(NR_CPUS), /* number of cores supported */
723 0, 724 0,
724 0, 725 0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 30a03c03fe73..060b140f03c6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
377 377
378#else 378#else
379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
380 offsetof(struct thread_fp_state, fpr[32][0])); 380 offsetof(struct thread_fp_state, fpr[32]));
381 381
382 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 382 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
383 &target->thread.fp_state, 0, -1); 383 &target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
405 return 0; 405 return 0;
406#else 406#else
407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
408 offsetof(struct thread_fp_state, fpr[32][0])); 408 offsetof(struct thread_fp_state, fpr[32]));
409 409
410 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 410 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
411 &target->thread.fp_state, 0, -1); 411 &target->thread.fp_state, 0, -1);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d873f6507f72..f8a871a72985 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
316 DBG_LOW(" -> hit\n"); 316 DBG_LOW(" -> hit\n");
317 /* Update the HPTE */ 317 /* Update the HPTE */
318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 ~(HPTE_R_PP | HPTE_R_N)) | 319 ~(HPTE_R_PPP | HPTE_R_N)) |
320 (newpp & (HPTE_R_PP | HPTE_R_N | 320 (newpp & (HPTE_R_PPP | HPTE_R_N |
321 HPTE_R_C))); 321 HPTE_R_C)));
322 } 322 }
323 native_unlock_hpte(hptep); 323 native_unlock_hpte(hptep);
@@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
385 385
386 /* Update the HPTE */ 386 /* Update the HPTE */
387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388 ~(HPTE_R_PP | HPTE_R_N)) | 388 ~(HPTE_R_PPP | HPTE_R_N)) |
389 (newpp & (HPTE_R_PP | HPTE_R_N))); 389 (newpp & (HPTE_R_PPP | HPTE_R_N)));
390 /* 390 /*
391 * Ensure it is out of the tlb too. Bolted entries base and 391 * Ensure it is out of the tlb too. Bolted entries base and
392 * actual page size will be same. 392 * actual page size will be same.
@@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
550 } 550 }
551 } 551 }
552 /* This works for all page sizes, and for 256M and 1T segments */ 552 /* This works for all page sizes, and for 256M and 1T segments */
553 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 553 if (cpu_has_feature(CPU_FTR_ARCH_300))
554 *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
555 else
556 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
557
554 shift = mmu_psize_defs[size].shift; 558 shift = mmu_psize_defs[size].shift;
555 559
556 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); 560 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 59268969a0bc..5b22ba0b58bc 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
159 }, 159 },
160}; 160};
161 161
162/*
163 * 'R' and 'C' update notes:
164 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
165 * create writeable HPTEs without C set, because the hcall H_PROTECT
166 * that we use in that case will not update C
167 * - The above is however not a problem, because we also don't do that
168 * fancy "no flush" variant of eviction and we use H_REMOVE which will
169 * do the right thing and thus we don't have the race I described earlier
170 *
171 * - Under bare metal, we do have the race, so we need R and C set
172 * - We make sure R is always set and never lost
173 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
174 */
162unsigned long htab_convert_pte_flags(unsigned long pteflags) 175unsigned long htab_convert_pte_flags(unsigned long pteflags)
163{ 176{
164 unsigned long rflags = 0; 177 unsigned long rflags = 0;
@@ -186,19 +199,28 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
186 rflags |= 0x1; 199 rflags |= 0x1;
187 } 200 }
188 /* 201 /*
189 * Always add "C" bit for perf. Memory coherence is always enabled 202 * We can't allow hardware to update hpte bits. Hence always
203 * set 'R' bit and set 'C' if it is a write fault
190 */ 204 */
191 rflags |= HPTE_R_C | HPTE_R_M; 205 rflags |= HPTE_R_R;
206
207 if (pteflags & _PAGE_DIRTY)
208 rflags |= HPTE_R_C;
192 /* 209 /*
193 * Add in WIG bits 210 * Add in WIG bits
194 */ 211 */
195 212
196 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) 213 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
197 rflags |= HPTE_R_I; 214 rflags |= HPTE_R_I;
198 if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT) 215 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
199 rflags |= (HPTE_R_I | HPTE_R_G); 216 rflags |= (HPTE_R_I | HPTE_R_G);
200 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) 217 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
201 rflags |= (HPTE_R_I | HPTE_R_W); 218 rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
219 else
220 /*
221 * Add memory coherence if cache inhibited is not set
222 */
223 rflags |= HPTE_R_M;
202 224
203 return rflags; 225 return rflags;
204} 226}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5aac1a3f86cd..119d18611500 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
73 cachep = PGT_CACHE(pdshift - pshift); 73 cachep = PGT_CACHE(pdshift - pshift);
74#endif 74#endif
75 75
76 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); 76 new = kmem_cache_zalloc(cachep, GFP_KERNEL);
77 77
78 BUG_ON(pshift > HUGEPD_SHIFT_MASK); 78 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); 79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 227b2a6c4544..196222227e82 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
65 /* 65 /*
66 * set the process table entry, 66 * set the process table entry,
67 */ 67 */
68 rts_field = 3ull << PPC_BITLSHIFT(2); 68 rts_field = radix__get_tree_size();
69 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); 69 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
70 return 0; 70 return 0;
71} 71}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index eb4451144746..670318766545 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
33 changed = !pmd_same(*(pmdp), entry); 33 changed = !pmd_same(*(pmdp), entry);
34 if (changed) { 34 if (changed) {
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); 35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
36 /* 36 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
37 * Since we are not supporting SW TLB systems, we don't
38 * have any thing similar to flush_tlb_page_nohash()
39 */
40 } 37 }
41 return changed; 38 return changed;
42} 39}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 18b2c11604fa..e58707deef5c 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -160,9 +160,8 @@ redo:
160 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); 160 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
161 /* 161 /*
162 * Fill in the process table. 162 * Fill in the process table.
163 * we support 52 bits, hence 52-28 = 24, 11000
164 */ 163 */
165 rts_field = 3ull << PPC_BITLSHIFT(2); 164 rts_field = radix__get_tree_size();
166 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); 165 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
167 /* 166 /*
168 * Fill in the partition table. We are suppose to use effective address 167 * Fill in the partition table. We are suppose to use effective address
@@ -176,10 +175,8 @@ redo:
176static void __init radix_init_partition_table(void) 175static void __init radix_init_partition_table(void)
177{ 176{
178 unsigned long rts_field; 177 unsigned long rts_field;
179 /* 178
180 * we support 52 bits, hence 52-28 = 24, 11000 179 rts_field = radix__get_tree_size();
181 */
182 rts_field = 3ull << PPC_BITLSHIFT(2);
183 180
184 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); 181 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
185 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); 182 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
@@ -296,11 +293,6 @@ found:
296void __init radix__early_init_mmu(void) 293void __init radix__early_init_mmu(void)
297{ 294{
298 unsigned long lpcr; 295 unsigned long lpcr;
299 /*
300 * setup LPCR UPRT based on mmu_features
301 */
302 lpcr = mfspr(SPRN_LPCR);
303 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
304 296
305#ifdef CONFIG_PPC_64K_PAGES 297#ifdef CONFIG_PPC_64K_PAGES
306 /* PAGE_SIZE mappings */ 298 /* PAGE_SIZE mappings */
@@ -343,8 +335,11 @@ void __init radix__early_init_mmu(void)
343 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; 335 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
344 336
345 radix_init_page_sizes(); 337 radix_init_page_sizes();
346 if (!firmware_has_feature(FW_FEATURE_LPAR)) 338 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
339 lpcr = mfspr(SPRN_LPCR);
340 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
347 radix_init_partition_table(); 341 radix_init_partition_table();
342 }
348 343
349 radix_init_pgtable(); 344 radix_init_pgtable();
350} 345}
@@ -353,16 +348,15 @@ void radix__early_init_mmu_secondary(void)
353{ 348{
354 unsigned long lpcr; 349 unsigned long lpcr;
355 /* 350 /*
356 * setup LPCR UPRT based on mmu_features 351 * update partition table control register and UPRT
357 */
358 lpcr = mfspr(SPRN_LPCR);
359 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
360 /*
361 * update partition table control register, 64 K size.
362 */ 352 */
363 if (!firmware_has_feature(FW_FEATURE_LPAR)) 353 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
354 lpcr = mfspr(SPRN_LPCR);
355 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
356
364 mtspr(SPRN_PTCR, 357 mtspr(SPRN_PTCR,
365 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); 358 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
359 }
366} 360}
367 361
368void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, 362void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bf7bf32b54f8..7f922f557936 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
84 pte_t *pte; 84 pte_t *pte;
85 85
86 if (slab_is_available()) { 86 if (slab_is_available()) {
87 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 87 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
88 } else { 88 } else {
89 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 89 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
90 if (pte) 90 if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
97{ 97{
98 struct page *ptepage; 98 struct page *ptepage;
99 99
100 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; 100 gfp_t flags = GFP_KERNEL | __GFP_ZERO;
101 101
102 ptepage = alloc_pages(flags, 0); 102 ptepage = alloc_pages(flags, 0);
103 if (!ptepage) 103 if (!ptepage)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e009e0604a8a..f5e8d4edb808 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
350static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) 350static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
351{ 351{
352 void *ret = NULL; 352 void *ret = NULL;
353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
354 __GFP_REPEAT | __GFP_ZERO);
355 if (!page) 354 if (!page)
356 return NULL; 355 return NULL;
357 if (!kernel && !pgtable_page_ctor(page)) { 356 if (!kernel && !pgtable_page_ctor(page)) {
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0fdaf93a3e09..ab2f60e812e2 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -18,16 +18,20 @@
18 18
19static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 19static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
20 20
21static inline void __tlbiel_pid(unsigned long pid, int set) 21#define RIC_FLUSH_TLB 0
22#define RIC_FLUSH_PWC 1
23#define RIC_FLUSH_ALL 2
24
25static inline void __tlbiel_pid(unsigned long pid, int set,
26 unsigned long ric)
22{ 27{
23 unsigned long rb,rs,ric,prs,r; 28 unsigned long rb,rs,prs,r;
24 29
25 rb = PPC_BIT(53); /* IS = 1 */ 30 rb = PPC_BIT(53); /* IS = 1 */
26 rb |= set << PPC_BITLSHIFT(51); 31 rb |= set << PPC_BITLSHIFT(51);
27 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); 32 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
28 prs = 1; /* process scoped */ 33 prs = 1; /* process scoped */
29 r = 1; /* raidx format */ 34 r = 1; /* raidx format */
30 ric = 2; /* invalidate all the caches */
31 35
32 asm volatile("ptesync": : :"memory"); 36 asm volatile("ptesync": : :"memory");
33 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 37 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
39/* 43/*
40 * We use 128 set in radix mode and 256 set in hpt mode. 44 * We use 128 set in radix mode and 256 set in hpt mode.
41 */ 45 */
42static inline void _tlbiel_pid(unsigned long pid) 46static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
43{ 47{
44 int set; 48 int set;
45 49
46 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
47 __tlbiel_pid(pid, set); 51 __tlbiel_pid(pid, set, ric);
48 } 52 }
49 return; 53 return;
50} 54}
51 55
52static inline void _tlbie_pid(unsigned long pid) 56static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
53{ 57{
54 unsigned long rb,rs,ric,prs,r; 58 unsigned long rb,rs,prs,r;
55 59
56 rb = PPC_BIT(53); /* IS = 1 */ 60 rb = PPC_BIT(53); /* IS = 1 */
57 rs = pid << PPC_BITLSHIFT(31); 61 rs = pid << PPC_BITLSHIFT(31);
58 prs = 1; /* process scoped */ 62 prs = 1; /* process scoped */
59 r = 1; /* raidx format */ 63 r = 1; /* raidx format */
60 ric = 2; /* invalidate all the caches */
61 64
62 asm volatile("ptesync": : :"memory"); 65 asm volatile("ptesync": : :"memory");
63 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 66 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
67} 70}
68 71
69static inline void _tlbiel_va(unsigned long va, unsigned long pid, 72static inline void _tlbiel_va(unsigned long va, unsigned long pid,
70 unsigned long ap) 73 unsigned long ap, unsigned long ric)
71{ 74{
72 unsigned long rb,rs,ric,prs,r; 75 unsigned long rb,rs,prs,r;
73 76
74 rb = va & ~(PPC_BITMASK(52, 63)); 77 rb = va & ~(PPC_BITMASK(52, 63));
75 rb |= ap << PPC_BITLSHIFT(58); 78 rb |= ap << PPC_BITLSHIFT(58);
76 rs = pid << PPC_BITLSHIFT(31); 79 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */ 80 prs = 1; /* process scoped */
78 r = 1; /* raidx format */ 81 r = 1; /* raidx format */
79 ric = 0; /* no cluster flush yet */
80 82
81 asm volatile("ptesync": : :"memory"); 83 asm volatile("ptesync": : :"memory");
82 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 84 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
86} 88}
87 89
88static inline void _tlbie_va(unsigned long va, unsigned long pid, 90static inline void _tlbie_va(unsigned long va, unsigned long pid,
89 unsigned long ap) 91 unsigned long ap, unsigned long ric)
90{ 92{
91 unsigned long rb,rs,ric,prs,r; 93 unsigned long rb,rs,prs,r;
92 94
93 rb = va & ~(PPC_BITMASK(52, 63)); 95 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58); 96 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31); 97 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */ 98 prs = 1; /* process scoped */
97 r = 1; /* raidx format */ 99 r = 1; /* raidx format */
98 ric = 0; /* no cluster flush yet */
99 100
100 asm volatile("ptesync": : :"memory"); 101 asm volatile("ptesync": : :"memory");
101 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 102 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -117,25 +118,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
117 */ 118 */
118void radix__local_flush_tlb_mm(struct mm_struct *mm) 119void radix__local_flush_tlb_mm(struct mm_struct *mm)
119{ 120{
120 unsigned int pid; 121 unsigned long pid;
121 122
122 preempt_disable(); 123 preempt_disable();
123 pid = mm->context.id; 124 pid = mm->context.id;
124 if (pid != MMU_NO_CONTEXT) 125 if (pid != MMU_NO_CONTEXT)
125 _tlbiel_pid(pid); 126 _tlbiel_pid(pid, RIC_FLUSH_ALL);
126 preempt_enable(); 127 preempt_enable();
127} 128}
128EXPORT_SYMBOL(radix__local_flush_tlb_mm); 129EXPORT_SYMBOL(radix__local_flush_tlb_mm);
129 130
131void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
132{
133 unsigned long pid;
134 struct mm_struct *mm = tlb->mm;
135
136 preempt_disable();
137
138 pid = mm->context.id;
139 if (pid != MMU_NO_CONTEXT)
140 _tlbiel_pid(pid, RIC_FLUSH_PWC);
141
142 preempt_enable();
143}
144EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
145
130void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 146void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
131 unsigned long ap, int nid) 147 unsigned long ap, int nid)
132{ 148{
133 unsigned int pid; 149 unsigned long pid;
134 150
135 preempt_disable(); 151 preempt_disable();
136 pid = mm ? mm->context.id : 0; 152 pid = mm ? mm->context.id : 0;
137 if (pid != MMU_NO_CONTEXT) 153 if (pid != MMU_NO_CONTEXT)
138 _tlbiel_va(vmaddr, pid, ap); 154 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
139 preempt_enable(); 155 preempt_enable();
140} 156}
141 157
@@ -160,7 +176,7 @@ static int mm_is_core_local(struct mm_struct *mm)
160 176
161void radix__flush_tlb_mm(struct mm_struct *mm) 177void radix__flush_tlb_mm(struct mm_struct *mm)
162{ 178{
163 unsigned int pid; 179 unsigned long pid;
164 180
165 preempt_disable(); 181 preempt_disable();
166 pid = mm->context.id; 182 pid = mm->context.id;
@@ -172,20 +188,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
172 188
173 if (lock_tlbie) 189 if (lock_tlbie)
174 raw_spin_lock(&native_tlbie_lock); 190 raw_spin_lock(&native_tlbie_lock);
175 _tlbie_pid(pid); 191 _tlbie_pid(pid, RIC_FLUSH_ALL);
176 if (lock_tlbie) 192 if (lock_tlbie)
177 raw_spin_unlock(&native_tlbie_lock); 193 raw_spin_unlock(&native_tlbie_lock);
178 } else 194 } else
179 _tlbiel_pid(pid); 195 _tlbiel_pid(pid, RIC_FLUSH_ALL);
180no_context: 196no_context:
181 preempt_enable(); 197 preempt_enable();
182} 198}
183EXPORT_SYMBOL(radix__flush_tlb_mm); 199EXPORT_SYMBOL(radix__flush_tlb_mm);
184 200
201void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
202{
203 unsigned long pid;
204 struct mm_struct *mm = tlb->mm;
205
206 preempt_disable();
207
208 pid = mm->context.id;
209 if (unlikely(pid == MMU_NO_CONTEXT))
210 goto no_context;
211
212 if (!mm_is_core_local(mm)) {
213 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
214
215 if (lock_tlbie)
216 raw_spin_lock(&native_tlbie_lock);
217 _tlbie_pid(pid, RIC_FLUSH_PWC);
218 if (lock_tlbie)
219 raw_spin_unlock(&native_tlbie_lock);
220 } else
221 _tlbiel_pid(pid, RIC_FLUSH_PWC);
222no_context:
223 preempt_enable();
224}
225EXPORT_SYMBOL(radix__flush_tlb_pwc);
226
185void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 227void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
186 unsigned long ap, int nid) 228 unsigned long ap, int nid)
187{ 229{
188 unsigned int pid; 230 unsigned long pid;
189 231
190 preempt_disable(); 232 preempt_disable();
191 pid = mm ? mm->context.id : 0; 233 pid = mm ? mm->context.id : 0;
@@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
196 238
197 if (lock_tlbie) 239 if (lock_tlbie)
198 raw_spin_lock(&native_tlbie_lock); 240 raw_spin_lock(&native_tlbie_lock);
199 _tlbie_va(vmaddr, pid, ap); 241 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
200 if (lock_tlbie) 242 if (lock_tlbie)
201 raw_spin_unlock(&native_tlbie_lock); 243 raw_spin_unlock(&native_tlbie_lock);
202 } else 244 } else
203 _tlbiel_va(vmaddr, pid, ap); 245 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
204bail: 246bail:
205 preempt_enable(); 247 preempt_enable();
206} 248}
@@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
224 266
225 if (lock_tlbie) 267 if (lock_tlbie)
226 raw_spin_lock(&native_tlbie_lock); 268 raw_spin_lock(&native_tlbie_lock);
227 _tlbie_pid(0); 269 _tlbie_pid(0, RIC_FLUSH_ALL);
228 if (lock_tlbie) 270 if (lock_tlbie)
229 raw_spin_unlock(&native_tlbie_lock); 271 raw_spin_unlock(&native_tlbie_lock);
230} 272}
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index c50ea76ba66c..6081fbd75330 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
221/* convenience wrappers around the common clk API */ 221/* convenience wrappers around the common clk API */
222static inline struct clk *mpc512x_clk_fixed(const char *name, int rate) 222static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
223{ 223{
224 return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); 224 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
225} 225}
226 226
227static inline struct clk *mpc512x_clk_factor( 227static inline struct clk *mpc512x_clk_factor(
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 84fb984f29c1..85c85eb3e245 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
172 if (rc < 0) 172 if (rc < 0)
173 goto out; 173 goto out;
174 174
175 skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos; 175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
176 if (!dump_skip(cprm, skip)) 176 if (!dump_skip(cprm, skip))
177 goto Eio; 177 goto Eio;
178out: 178out:
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ac3ffd97e059..3998e0f9a03b 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
53static int ibm_slot_error_detail; 53static int ibm_slot_error_detail;
54static int ibm_get_config_addr_info; 54static int ibm_get_config_addr_info;
55static int ibm_get_config_addr_info2; 55static int ibm_get_config_addr_info2;
56static int ibm_configure_bridge;
57static int ibm_configure_pe; 56static int ibm_configure_pe;
58 57
59/* 58/*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 80 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 81 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 82 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 83
84 /*
85 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
86 * however ibm,configure-pe can be faster. If we can't find
87 * ibm,configure-pe then fall back to using ibm,configure-bridge.
88 */
89 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
90 ibm_configure_pe = rtas_token("ibm,configure-bridge");
85 91
86 /* 92 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info" 93 * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
93 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 99 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
94 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
95 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 101 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
96 (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 102 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
97 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
98 pr_info("EEH functionality not supported\n"); 103 pr_info("EEH functionality not supported\n");
99 return -EINVAL; 104 return -EINVAL;
100 } 105 }
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
615{ 620{
616 int config_addr; 621 int config_addr;
617 int ret; 622 int ret;
623 /* Waiting 0.2s maximum before skipping configuration */
624 int max_wait = 200;
618 625
619 /* Figure out the PE address */ 626 /* Figure out the PE address */
620 config_addr = pe->config_addr; 627 config_addr = pe->config_addr;
621 if (pe->addr) 628 if (pe->addr)
622 config_addr = pe->addr; 629 config_addr = pe->addr;
623 630
624 /* Use new configure-pe function, if supported */ 631 while (max_wait > 0) {
625 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
626 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 632 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
627 config_addr, BUID_HI(pe->phb->buid), 633 config_addr, BUID_HI(pe->phb->buid),
628 BUID_LO(pe->phb->buid)); 634 BUID_LO(pe->phb->buid));
629 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
630 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
631 config_addr, BUID_HI(pe->phb->buid),
632 BUID_LO(pe->phb->buid));
633 } else {
634 return -EFAULT;
635 }
636 635
637 if (ret) 636 if (!ret)
638 pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 637 return ret;
639 __func__, pe->phb->global_number, pe->addr, ret); 638
639 /*
640 * If RTAS returns a delay value that's above 100ms, cut it
641 * down to 100ms in case firmware made a mistake. For more
642 * on how these delay values work see rtas_busy_delay_time
643 */
644 if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
645 ret <= RTAS_EXTENDED_DELAY_MAX)
646 ret = RTAS_EXTENDED_DELAY_MIN+2;
647
648 max_wait -= rtas_busy_delay_time(ret);
649
650 if (max_wait < 0)
651 break;
652
653 rtas_busy_delay(ret);
654 }
640 655
656 pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
657 __func__, pe->phb->global_number, pe->addr, ret);
641 return ret; 658 return ret;
642} 659}
643 660
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index b7dfc1359d01..3e8865b187de 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
927 dn = pci_device_to_OF_node(dev); 927 dn = pci_device_to_OF_node(dev);
928 pdn = PCI_DN(dn); 928 pdn = PCI_DN(dn);
929 buid = pdn->phb->buid; 929 buid = pdn->phb->buid;
930 cfg_addr = (pdn->busno << 8) | pdn->devfn; 930 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
931 931
932 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, 932 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
933 cfg_addr, BUID_HI(buid), BUID_LO(buid)); 933 cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
956 dn = pci_device_to_OF_node(dev); 956 dn = pci_device_to_OF_node(dev);
957 pdn = PCI_DN(dn); 957 pdn = PCI_DN(dn);
958 buid = pdn->phb->buid; 958 buid = pdn->phb->buid;
959 cfg_addr = (pdn->busno << 8) | pdn->devfn; 959 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
960 960
961 do { 961 do {
962 /* extra outputs are LIOBN and dma-addr (hi, lo) */ 962 /* extra outputs are LIOBN and dma-addr (hi, lo) */
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0ac42cc4f880..d5ec71b2ed02 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_DEVICE=y
19CONFIG_CPUSETS=y
20CONFIG_CGROUP_CPUACCT=y
21CONFIG_MEMCG=y 15CONFIG_MEMCG=y
22CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
23CONFIG_MEMCG_KMEM=y 17CONFIG_BLK_CGROUP=y
24CONFIG_CGROUP_HUGETLB=y
25CONFIG_CGROUP_PERF=y
26CONFIG_CFS_BANDWIDTH=y 18CONFIG_CFS_BANDWIDTH=y
27CONFIG_RT_GROUP_SCHED=y 19CONFIG_RT_GROUP_SCHED=y
28CONFIG_BLK_CGROUP=y 20CONFIG_CGROUP_PIDS=y
21CONFIG_CGROUP_FREEZER=y
22CONFIG_CGROUP_HUGETLB=y
23CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y
25CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y
27CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_NAMESPACES=y 28CONFIG_NAMESPACES=y
30CONFIG_USER_NS=y 29CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
55CONFIG_CFQ_GROUP_IOSCHED=y 54CONFIG_CFQ_GROUP_IOSCHED=y
56CONFIG_DEFAULT_DEADLINE=y 55CONFIG_DEFAULT_DEADLINE=y
57CONFIG_LIVEPATCH=y 56CONFIG_LIVEPATCH=y
58CONFIG_MARCH_Z196=y
59CONFIG_TUNE_ZEC12=y 57CONFIG_TUNE_ZEC12=y
60CONFIG_NR_CPUS=256 58CONFIG_NR_CPUS=256
61CONFIG_NUMA=y 59CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
65CONFIG_MEMORY_HOTREMOVE=y 63CONFIG_MEMORY_HOTREMOVE=y
66CONFIG_KSM=y 64CONFIG_KSM=y
67CONFIG_TRANSPARENT_HUGEPAGE=y 65CONFIG_TRANSPARENT_HUGEPAGE=y
66CONFIG_CLEANCACHE=y
67CONFIG_FRONTSWAP=y
68CONFIG_CMA=y
69CONFIG_MEM_SOFT_DIRTY=y
70CONFIG_ZPOOL=m
71CONFIG_ZBUD=m
72CONFIG_ZSMALLOC=m
73CONFIG_ZSMALLOC_STAT=y
74CONFIG_IDLE_PAGE_TRACKING=y
68CONFIG_PCI=y 75CONFIG_PCI=y
69CONFIG_PCI_DEBUG=y 76CONFIG_PCI_DEBUG=y
70CONFIG_HOTPLUG_PCI=y 77CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
452CONFIG_RAW_DRIVER=m 459CONFIG_RAW_DRIVER=m
453CONFIG_HANGCHECK_TIMER=m 460CONFIG_HANGCHECK_TIMER=m
454CONFIG_TN3270_FS=y 461CONFIG_TN3270_FS=y
462# CONFIG_HWMON is not set
455CONFIG_WATCHDOG=y 463CONFIG_WATCHDOG=y
456CONFIG_WATCHDOG_NOWAYOUT=y 464CONFIG_WATCHDOG_NOWAYOUT=y
457CONFIG_SOFT_WATCHDOG=m 465CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
537CONFIG_PRINTK_TIME=y 545CONFIG_PRINTK_TIME=y
538CONFIG_DYNAMIC_DEBUG=y 546CONFIG_DYNAMIC_DEBUG=y
539CONFIG_DEBUG_INFO=y 547CONFIG_DEBUG_INFO=y
548CONFIG_DEBUG_INFO_DWARF4=y
549CONFIG_GDB_SCRIPTS=y
540CONFIG_FRAME_WARN=1024 550CONFIG_FRAME_WARN=1024
541CONFIG_READABLE_ASM=y 551CONFIG_READABLE_ASM=y
542CONFIG_UNUSED_SYMBOLS=y 552CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
555CONFIG_SLUB_STATS=y 565CONFIG_SLUB_STATS=y
556CONFIG_DEBUG_STACK_USAGE=y 566CONFIG_DEBUG_STACK_USAGE=y
557CONFIG_DEBUG_VM=y 567CONFIG_DEBUG_VM=y
568CONFIG_DEBUG_VM_VMACACHE=y
558CONFIG_DEBUG_VM_RB=y 569CONFIG_DEBUG_VM_RB=y
570CONFIG_DEBUG_VM_PGFLAGS=y
559CONFIG_DEBUG_MEMORY_INIT=y 571CONFIG_DEBUG_MEMORY_INIT=y
560CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m 572CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
561CONFIG_DEBUG_PER_CPU_MAPS=y 573CONFIG_DEBUG_PER_CPU_MAPS=y
562CONFIG_DEBUG_SHIRQ=y 574CONFIG_DEBUG_SHIRQ=y
563CONFIG_DETECT_HUNG_TASK=y 575CONFIG_DETECT_HUNG_TASK=y
576CONFIG_WQ_WATCHDOG=y
564CONFIG_PANIC_ON_OOPS=y 577CONFIG_PANIC_ON_OOPS=y
578CONFIG_DEBUG_TIMEKEEPING=y
565CONFIG_TIMER_STATS=y 579CONFIG_TIMER_STATS=y
566CONFIG_DEBUG_RT_MUTEXES=y 580CONFIG_DEBUG_RT_MUTEXES=y
567CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 581CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
596CONFIG_STACK_TRACER=y 610CONFIG_STACK_TRACER=y
597CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
598CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENT=y
613CONFIG_FUNCTION_PROFILER=y
614CONFIG_TRACE_ENUM_MAP_FILE=y
599CONFIG_LKDTM=m 615CONFIG_LKDTM=m
600CONFIG_TEST_LIST_SORT=y 616CONFIG_TEST_LIST_SORT=y
601CONFIG_KPROBES_SANITY_TEST=y 617CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
607CONFIG_TEST_KSTRTOX=y 623CONFIG_TEST_KSTRTOX=y
608CONFIG_DMA_API_DEBUG=y 624CONFIG_DMA_API_DEBUG=y
609CONFIG_TEST_BPF=m 625CONFIG_TEST_BPF=m
610# CONFIG_STRICT_DEVMEM is not set
611CONFIG_S390_PTDUMP=y 626CONFIG_S390_PTDUMP=y
612CONFIG_ENCRYPTED_KEYS=m 627CONFIG_ENCRYPTED_KEYS=m
613CONFIG_SECURITY=y 628CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
651CONFIG_CRYPTO_SERPENT=m 666CONFIG_CRYPTO_SERPENT=m
652CONFIG_CRYPTO_TEA=m 667CONFIG_CRYPTO_TEA=m
653CONFIG_CRYPTO_TWOFISH=m 668CONFIG_CRYPTO_TWOFISH=m
654CONFIG_CRYPTO_ZLIB=y
655CONFIG_CRYPTO_LZO=m 669CONFIG_CRYPTO_LZO=m
656CONFIG_CRYPTO_LZ4=m 670CONFIG_CRYPTO_LZ4=m
657CONFIG_CRYPTO_LZ4HC=m 671CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
664CONFIG_CRYPTO_DES_S390=m 678CONFIG_CRYPTO_DES_S390=m
665CONFIG_CRYPTO_AES_S390=m 679CONFIG_CRYPTO_AES_S390=m
666CONFIG_CRYPTO_GHASH_S390=m 680CONFIG_CRYPTO_GHASH_S390=m
667CONFIG_ASYMMETRIC_KEY_TYPE=m 681CONFIG_ASYMMETRIC_KEY_TYPE=y
668CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 682CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
669CONFIG_X509_CERTIFICATE_PARSER=m 683CONFIG_X509_CERTIFICATE_PARSER=m
670CONFIG_CRC7=m 684CONFIG_CRC7=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index a31dcd56f7c0..f46a35115d2d 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_DEVICE=y
19CONFIG_CPUSETS=y
20CONFIG_CGROUP_CPUACCT=y
21CONFIG_MEMCG=y 15CONFIG_MEMCG=y
22CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
23CONFIG_MEMCG_KMEM=y 17CONFIG_BLK_CGROUP=y
18CONFIG_CGROUP_PIDS=y
19CONFIG_CGROUP_FREEZER=y
24CONFIG_CGROUP_HUGETLB=y 20CONFIG_CGROUP_HUGETLB=y
21CONFIG_CPUSETS=y
22CONFIG_CGROUP_DEVICE=y
23CONFIG_CGROUP_CPUACCT=y
25CONFIG_CGROUP_PERF=y 24CONFIG_CGROUP_PERF=y
26CONFIG_BLK_CGROUP=y 25CONFIG_CHECKPOINT_RESTORE=y
27CONFIG_NAMESPACES=y 26CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 27CONFIG_USER_NS=y
29CONFIG_SCHED_AUTOGROUP=y 28CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
53CONFIG_UNIXWARE_DISKLABEL=y 52CONFIG_UNIXWARE_DISKLABEL=y
54CONFIG_CFQ_GROUP_IOSCHED=y 53CONFIG_CFQ_GROUP_IOSCHED=y
55CONFIG_DEFAULT_DEADLINE=y 54CONFIG_DEFAULT_DEADLINE=y
56CONFIG_MARCH_Z196=y
57CONFIG_TUNE_ZEC12=y 55CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=256 56CONFIG_NR_CPUS=256
59CONFIG_NUMA=y 57CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
62CONFIG_MEMORY_HOTREMOVE=y 60CONFIG_MEMORY_HOTREMOVE=y
63CONFIG_KSM=y 61CONFIG_KSM=y
64CONFIG_TRANSPARENT_HUGEPAGE=y 62CONFIG_TRANSPARENT_HUGEPAGE=y
63CONFIG_CLEANCACHE=y
64CONFIG_FRONTSWAP=y
65CONFIG_CMA=y
66CONFIG_ZSWAP=y
67CONFIG_ZBUD=m
68CONFIG_ZSMALLOC=m
69CONFIG_ZSMALLOC_STAT=y
70CONFIG_IDLE_PAGE_TRACKING=y
65CONFIG_PCI=y 71CONFIG_PCI=y
66CONFIG_HOTPLUG_PCI=y 72CONFIG_HOTPLUG_PCI=y
67CONFIG_HOTPLUG_PCI_S390=y 73CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
530CONFIG_DLM=m 536CONFIG_DLM=m
531CONFIG_PRINTK_TIME=y 537CONFIG_PRINTK_TIME=y
532CONFIG_DEBUG_INFO=y 538CONFIG_DEBUG_INFO=y
539CONFIG_DEBUG_INFO_DWARF4=y
540CONFIG_GDB_SCRIPTS=y
533# CONFIG_ENABLE_MUST_CHECK is not set 541# CONFIG_ENABLE_MUST_CHECK is not set
534CONFIG_FRAME_WARN=1024 542CONFIG_FRAME_WARN=1024
535CONFIG_UNUSED_SYMBOLS=y 543CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
547CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y 555CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
548CONFIG_BLK_DEV_IO_TRACE=y 556CONFIG_BLK_DEV_IO_TRACE=y
549# CONFIG_KPROBE_EVENT is not set 557# CONFIG_KPROBE_EVENT is not set
558CONFIG_TRACE_ENUM_MAP_FILE=y
550CONFIG_LKDTM=m 559CONFIG_LKDTM=m
551CONFIG_RBTREE_TEST=m 560CONFIG_RBTREE_TEST=m
552CONFIG_INTERVAL_TREE_TEST=m 561CONFIG_INTERVAL_TREE_TEST=m
553CONFIG_PERCPU_TEST=m 562CONFIG_PERCPU_TEST=m
554CONFIG_ATOMIC64_SELFTEST=y 563CONFIG_ATOMIC64_SELFTEST=y
555CONFIG_TEST_BPF=m 564CONFIG_TEST_BPF=m
556# CONFIG_STRICT_DEVMEM is not set
557CONFIG_S390_PTDUMP=y 565CONFIG_S390_PTDUMP=y
558CONFIG_ENCRYPTED_KEYS=m 566CONFIG_ENCRYPTED_KEYS=m
559CONFIG_SECURITY=y 567CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
597CONFIG_CRYPTO_SERPENT=m 605CONFIG_CRYPTO_SERPENT=m
598CONFIG_CRYPTO_TEA=m 606CONFIG_CRYPTO_TEA=m
599CONFIG_CRYPTO_TWOFISH=m 607CONFIG_CRYPTO_TWOFISH=m
600CONFIG_CRYPTO_ZLIB=y
601CONFIG_CRYPTO_LZO=m
602CONFIG_CRYPTO_LZ4=m 608CONFIG_CRYPTO_LZ4=m
603CONFIG_CRYPTO_LZ4HC=m 609CONFIG_CRYPTO_LZ4HC=m
604CONFIG_CRYPTO_USER_API_HASH=m 610CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
610CONFIG_CRYPTO_DES_S390=m 616CONFIG_CRYPTO_DES_S390=m
611CONFIG_CRYPTO_AES_S390=m 617CONFIG_CRYPTO_AES_S390=m
612CONFIG_CRYPTO_GHASH_S390=m 618CONFIG_CRYPTO_GHASH_S390=m
613CONFIG_ASYMMETRIC_KEY_TYPE=m 619CONFIG_ASYMMETRIC_KEY_TYPE=y
614CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 620CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
615CONFIG_X509_CERTIFICATE_PARSER=m 621CONFIG_X509_CERTIFICATE_PARSER=m
616CONFIG_CRC7=m 622CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 7b73bf353345..ba0f2a58b8cd 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set 15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
17CONFIG_CGROUP_FREEZER=y
18CONFIG_CGROUP_PIDS=y
19CONFIG_CGROUP_DEVICE=y
20CONFIG_CPUSETS=y
21CONFIG_CGROUP_CPUACCT=y
22CONFIG_MEMCG=y 16CONFIG_MEMCG=y
23CONFIG_MEMCG_SWAP=y 17CONFIG_MEMCG_SWAP=y
24CONFIG_MEMCG_KMEM=y 18CONFIG_BLK_CGROUP=y
19CONFIG_CGROUP_PIDS=y
20CONFIG_CGROUP_FREEZER=y
25CONFIG_CGROUP_HUGETLB=y 21CONFIG_CGROUP_HUGETLB=y
22CONFIG_CPUSETS=y
23CONFIG_CGROUP_DEVICE=y
24CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y 25CONFIG_CGROUP_PERF=y
27CONFIG_BLK_CGROUP=y 26CONFIG_CHECKPOINT_RESTORE=y
28CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
29CONFIG_USER_NS=y 28CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 29CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
53CONFIG_CFQ_GROUP_IOSCHED=y 52CONFIG_CFQ_GROUP_IOSCHED=y
54CONFIG_DEFAULT_DEADLINE=y 53CONFIG_DEFAULT_DEADLINE=y
55CONFIG_LIVEPATCH=y 54CONFIG_LIVEPATCH=y
56CONFIG_MARCH_Z196=y
57CONFIG_TUNE_ZEC12=y 55CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=512 56CONFIG_NR_CPUS=512
59CONFIG_NUMA=y 57CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
62CONFIG_MEMORY_HOTREMOVE=y 60CONFIG_MEMORY_HOTREMOVE=y
63CONFIG_KSM=y 61CONFIG_KSM=y
64CONFIG_TRANSPARENT_HUGEPAGE=y 62CONFIG_TRANSPARENT_HUGEPAGE=y
63CONFIG_CLEANCACHE=y
64CONFIG_FRONTSWAP=y
65CONFIG_CMA=y
66CONFIG_ZSWAP=y
67CONFIG_ZBUD=m
68CONFIG_ZSMALLOC=m
69CONFIG_ZSMALLOC_STAT=y
70CONFIG_IDLE_PAGE_TRACKING=y
65CONFIG_PCI=y 71CONFIG_PCI=y
66CONFIG_HOTPLUG_PCI=y 72CONFIG_HOTPLUG_PCI=y
67CONFIG_HOTPLUG_PCI_S390=y 73CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
447CONFIG_RAW_DRIVER=m 453CONFIG_RAW_DRIVER=m
448CONFIG_HANGCHECK_TIMER=m 454CONFIG_HANGCHECK_TIMER=m
449CONFIG_TN3270_FS=y 455CONFIG_TN3270_FS=y
456# CONFIG_HWMON is not set
450CONFIG_WATCHDOG=y 457CONFIG_WATCHDOG=y
451CONFIG_WATCHDOG_NOWAYOUT=y 458CONFIG_WATCHDOG_NOWAYOUT=y
452CONFIG_SOFT_WATCHDOG=m 459CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
530CONFIG_DLM=m 537CONFIG_DLM=m
531CONFIG_PRINTK_TIME=y 538CONFIG_PRINTK_TIME=y
532CONFIG_DEBUG_INFO=y 539CONFIG_DEBUG_INFO=y
540CONFIG_DEBUG_INFO_DWARF4=y
541CONFIG_GDB_SCRIPTS=y
533# CONFIG_ENABLE_MUST_CHECK is not set 542# CONFIG_ENABLE_MUST_CHECK is not set
534CONFIG_FRAME_WARN=1024 543CONFIG_FRAME_WARN=1024
535CONFIG_UNUSED_SYMBOLS=y 544CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
546CONFIG_STACK_TRACER=y 555CONFIG_STACK_TRACER=y
547CONFIG_BLK_DEV_IO_TRACE=y 556CONFIG_BLK_DEV_IO_TRACE=y
548CONFIG_UPROBE_EVENT=y 557CONFIG_UPROBE_EVENT=y
558CONFIG_FUNCTION_PROFILER=y
559CONFIG_TRACE_ENUM_MAP_FILE=y
549CONFIG_LKDTM=m 560CONFIG_LKDTM=m
550CONFIG_PERCPU_TEST=m 561CONFIG_PERCPU_TEST=m
551CONFIG_ATOMIC64_SELFTEST=y 562CONFIG_ATOMIC64_SELFTEST=y
552CONFIG_TEST_BPF=m 563CONFIG_TEST_BPF=m
553# CONFIG_STRICT_DEVMEM is not set
554CONFIG_S390_PTDUMP=y 564CONFIG_S390_PTDUMP=y
555CONFIG_ENCRYPTED_KEYS=m 565CONFIG_ENCRYPTED_KEYS=m
556CONFIG_SECURITY=y 566CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
594CONFIG_CRYPTO_SERPENT=m 604CONFIG_CRYPTO_SERPENT=m
595CONFIG_CRYPTO_TEA=m 605CONFIG_CRYPTO_TEA=m
596CONFIG_CRYPTO_TWOFISH=m 606CONFIG_CRYPTO_TWOFISH=m
597CONFIG_CRYPTO_ZLIB=y
598CONFIG_CRYPTO_LZO=m
599CONFIG_CRYPTO_LZ4=m 607CONFIG_CRYPTO_LZ4=m
600CONFIG_CRYPTO_LZ4HC=m 608CONFIG_CRYPTO_LZ4HC=m
601CONFIG_CRYPTO_USER_API_HASH=m 609CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
607CONFIG_CRYPTO_DES_S390=m 615CONFIG_CRYPTO_DES_S390=m
608CONFIG_CRYPTO_AES_S390=m 616CONFIG_CRYPTO_AES_S390=m
609CONFIG_CRYPTO_GHASH_S390=m 617CONFIG_CRYPTO_GHASH_S390=m
610CONFIG_ASYMMETRIC_KEY_TYPE=m 618CONFIG_ASYMMETRIC_KEY_TYPE=y
611CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 619CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
612CONFIG_X509_CERTIFICATE_PARSER=m 620CONFIG_X509_CERTIFICATE_PARSER=m
613CONFIG_CRC7=m 621CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1719843a55a2..4366a3e3e754 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,5 +1,5 @@
1# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
2CONFIG_NO_HZ=y 2CONFIG_NO_HZ_IDLE=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
5CONFIG_CC_OPTIMIZE_FOR_SIZE=y 5CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
7CONFIG_PARTITION_ADVANCED=y 7CONFIG_PARTITION_ADVANCED=y
8CONFIG_IBM_PARTITION=y 8CONFIG_IBM_PARTITION=y
9CONFIG_DEFAULT_DEADLINE=y 9CONFIG_DEFAULT_DEADLINE=y
10CONFIG_MARCH_Z196=y
11CONFIG_TUNE_ZEC12=y 10CONFIG_TUNE_ZEC12=y
12# CONFIG_COMPAT is not set 11# CONFIG_COMPAT is not set
13CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 63# CONFIG_SCHED_DEBUG is not set
65CONFIG_RCU_CPU_STALL_TIMEOUT=60 64CONFIG_RCU_CPU_STALL_TIMEOUT=60
66# CONFIG_FTRACE is not set 65# CONFIG_FTRACE is not set
67# CONFIG_STRICT_DEVMEM is not set
68# CONFIG_PFAULT is not set 66# CONFIG_PFAULT is not set
69# CONFIG_S390_HYPFS_FS is not set 67# CONFIG_S390_HYPFS_FS is not set
70# CONFIG_VIRTUALIZATION is not set 68# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e24f2af4c73b..3f571ea89509 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,8 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y 3CONFIG_USELIB=y
4CONFIG_AUDIT=y 4CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_TASKSTATS=y 7CONFIG_TASKSTATS=y
8CONFIG_TASK_DELAY_ACCT=y 8CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_FREEZER=y
15CONFIG_CGROUP_PIDS=y
16CONFIG_CGROUP_DEVICE=y
17CONFIG_CPUSETS=y
18CONFIG_CGROUP_CPUACCT=y
19CONFIG_MEMCG=y 14CONFIG_MEMCG=y
20CONFIG_MEMCG_SWAP=y 15CONFIG_MEMCG_SWAP=y
21CONFIG_MEMCG_KMEM=y 16CONFIG_BLK_CGROUP=y
22CONFIG_CGROUP_HUGETLB=y
23CONFIG_CGROUP_PERF=y
24CONFIG_CGROUP_SCHED=y 17CONFIG_CGROUP_SCHED=y
25CONFIG_RT_GROUP_SCHED=y 18CONFIG_RT_GROUP_SCHED=y
26CONFIG_BLK_CGROUP=y 19CONFIG_CGROUP_PIDS=y
20CONFIG_CGROUP_FREEZER=y
21CONFIG_CGROUP_HUGETLB=y
22CONFIG_CPUSETS=y
23CONFIG_CGROUP_DEVICE=y
24CONFIG_CGROUP_CPUACCT=y
25CONFIG_CGROUP_PERF=y
26CONFIG_CHECKPOINT_RESTORE=y
27CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
44CONFIG_IBM_PARTITION=y 44CONFIG_IBM_PARTITION=y
45CONFIG_DEFAULT_DEADLINE=y 45CONFIG_DEFAULT_DEADLINE=y
46CONFIG_LIVEPATCH=y 46CONFIG_LIVEPATCH=y
47CONFIG_MARCH_Z196=y
48CONFIG_NR_CPUS=256 47CONFIG_NR_CPUS=256
49CONFIG_NUMA=y 48CONFIG_NUMA=y
50CONFIG_HZ_100=y 49CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
52CONFIG_MEMORY_HOTREMOVE=y 51CONFIG_MEMORY_HOTREMOVE=y
53CONFIG_KSM=y 52CONFIG_KSM=y
54CONFIG_TRANSPARENT_HUGEPAGE=y 53CONFIG_TRANSPARENT_HUGEPAGE=y
54CONFIG_CLEANCACHE=y
55CONFIG_FRONTSWAP=y
56CONFIG_CMA=y
57CONFIG_ZSWAP=y
58CONFIG_ZBUD=m
59CONFIG_ZSMALLOC=m
60CONFIG_ZSMALLOC_STAT=y
61CONFIG_IDLE_PAGE_TRACKING=y
55CONFIG_CRASH_DUMP=y 62CONFIG_CRASH_DUMP=y
56CONFIG_BINFMT_MISC=m 63CONFIG_BINFMT_MISC=m
57CONFIG_HIBERNATION=y 64CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
61CONFIG_NET_KEY=y 68CONFIG_NET_KEY=y
62CONFIG_INET=y 69CONFIG_INET=y
63CONFIG_IP_MULTICAST=y 70CONFIG_IP_MULTICAST=y
64# CONFIG_INET_LRO is not set
65CONFIG_L2TP=m 71CONFIG_L2TP=m
66CONFIG_L2TP_DEBUGFS=m 72CONFIG_L2TP_DEBUGFS=m
67CONFIG_VLAN_8021Q=y 73CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
144CONFIG_TMPFS_POSIX_ACL=y 150CONFIG_TMPFS_POSIX_ACL=y
145CONFIG_HUGETLBFS=y 151CONFIG_HUGETLBFS=y
146# CONFIG_NETWORK_FILESYSTEMS is not set 152# CONFIG_NETWORK_FILESYSTEMS is not set
153CONFIG_DEBUG_INFO=y
154CONFIG_DEBUG_INFO_DWARF4=y
155CONFIG_GDB_SCRIPTS=y
147CONFIG_UNUSED_SYMBOLS=y 156CONFIG_UNUSED_SYMBOLS=y
148CONFIG_DEBUG_SECTION_MISMATCH=y 157CONFIG_DEBUG_SECTION_MISMATCH=y
149CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 158CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
158CONFIG_DEBUG_LOCKDEP=y 167CONFIG_DEBUG_LOCKDEP=y
159CONFIG_DEBUG_ATOMIC_SLEEP=y 168CONFIG_DEBUG_ATOMIC_SLEEP=y
160CONFIG_DEBUG_LIST=y 169CONFIG_DEBUG_LIST=y
161CONFIG_DEBUG_PI_LIST=y
162CONFIG_DEBUG_SG=y 170CONFIG_DEBUG_SG=y
163CONFIG_DEBUG_NOTIFIERS=y 171CONFIG_DEBUG_NOTIFIERS=y
164CONFIG_RCU_CPU_STALL_TIMEOUT=60 172CONFIG_RCU_CPU_STALL_TIMEOUT=60
165CONFIG_RCU_TRACE=y 173CONFIG_RCU_TRACE=y
166CONFIG_LATENCYTOP=y 174CONFIG_LATENCYTOP=y
167CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y 175CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
168CONFIG_TRACER_SNAPSHOT=y 176CONFIG_SCHED_TRACER=y
177CONFIG_FTRACE_SYSCALLS=y
169CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
170CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
171CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
172CONFIG_UPROBE_EVENT=y 181CONFIG_UPROBE_EVENT=y
182CONFIG_FUNCTION_PROFILER=y
183CONFIG_TRACE_ENUM_MAP_FILE=y
173CONFIG_KPROBES_SANITY_TEST=y 184CONFIG_KPROBES_SANITY_TEST=y
174# CONFIG_STRICT_DEVMEM is not set
175CONFIG_S390_PTDUMP=y 185CONFIG_S390_PTDUMP=y
176CONFIG_CRYPTO_CRYPTD=m 186CONFIG_CRYPTO_CRYPTD=m
177CONFIG_CRYPTO_AUTHENC=m 187CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
212CONFIG_CRYPTO_TEA=m 222CONFIG_CRYPTO_TEA=m
213CONFIG_CRYPTO_TWOFISH=m 223CONFIG_CRYPTO_TWOFISH=m
214CONFIG_CRYPTO_DEFLATE=m 224CONFIG_CRYPTO_DEFLATE=m
215CONFIG_CRYPTO_ZLIB=m
216CONFIG_CRYPTO_LZO=m
217CONFIG_CRYPTO_LZ4=m 225CONFIG_CRYPTO_LZ4=m
218CONFIG_CRYPTO_LZ4HC=m 226CONFIG_CRYPTO_LZ4HC=m
219CONFIG_CRYPTO_ANSI_CPRNG=m 227CONFIG_CRYPTO_ANSI_CPRNG=m
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 37b9017c6a96..ac82e8eb936d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
245 u32 exit_stop_request; 245 u32 exit_stop_request;
246 u32 exit_validity; 246 u32 exit_validity;
247 u32 exit_instruction; 247 u32 exit_instruction;
248 u32 exit_pei;
248 u32 halt_successful_poll; 249 u32 halt_successful_poll;
249 u32 halt_attempted_poll; 250 u32 halt_attempted_poll;
250 u32 halt_poll_invalid; 251 u32 halt_poll_invalid;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 59215c518f37..7ec63b1d920d 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
649 649
650/* Performance monitoring unit for s390x */ 650/* Performance monitoring unit for s390x */
651static struct pmu cpumf_pmu = { 651static struct pmu cpumf_pmu = {
652 .task_ctx_nr = perf_sw_context,
653 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
652 .pmu_enable = cpumf_pmu_enable, 654 .pmu_enable = cpumf_pmu_enable,
653 .pmu_disable = cpumf_pmu_disable, 655 .pmu_disable = cpumf_pmu_disable,
654 .event_init = cpumf_pmu_event_init, 656 .event_init = cpumf_pmu_event_init,
@@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
708 goto out; 710 goto out;
709 } 711 }
710 712
711 /* The CPU measurement counter facility does not have overflow
712 * interrupts to do sampling. Sampling must be provided by
713 * external means, for example, by timers.
714 */
715 cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
716
717 cpumf_pmu.attr_groups = cpumf_cf_event_group(); 713 cpumf_pmu.attr_groups = cpumf_cf_event_group();
718 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); 714 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
719 if (rc) { 715 if (rc) {
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 2e6b54e4d3f9..252157181302 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
341 341
342static int handle_partial_execution(struct kvm_vcpu *vcpu) 342static int handle_partial_execution(struct kvm_vcpu *vcpu)
343{ 343{
344 vcpu->stat.exit_pei++;
345
344 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ 346 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
345 return handle_mvpg_pei(vcpu); 347 return handle_mvpg_pei(vcpu);
346 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ 348 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6d8ec3ac9dd8..43f2a2b80490 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "exit_external_request", VCPU_STAT(exit_external_request) }, 61 { "exit_external_request", VCPU_STAT(exit_external_request) },
62 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 62 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
63 { "exit_instruction", VCPU_STAT(exit_instruction) }, 63 { "exit_instruction", VCPU_STAT(exit_instruction) },
64 { "exit_pei", VCPU_STAT(exit_pei) },
64 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 65 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
65 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 66 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
66 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 67 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
657 kvm->arch.model.cpuid = proc->cpuid; 658 kvm->arch.model.cpuid = proc->cpuid;
658 lowest_ibc = sclp.ibc >> 16 & 0xfff; 659 lowest_ibc = sclp.ibc >> 16 & 0xfff;
659 unblocked_ibc = sclp.ibc & 0xfff; 660 unblocked_ibc = sclp.ibc & 0xfff;
660 if (lowest_ibc) { 661 if (lowest_ibc && proc->ibc) {
661 if (proc->ibc > unblocked_ibc) 662 if (proc->ibc > unblocked_ibc)
662 kvm->arch.model.ibc = unblocked_ibc; 663 kvm->arch.model.ibc = unblocked_ibc;
663 else if (proc->ibc < lowest_ibc) 664 else if (proc->ibc < lowest_ibc)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7a3144017301..19288c1b36d3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
250 250
251 report_user_fault(regs, SIGSEGV, 1); 251 report_user_fault(regs, SIGSEGV, 1);
252 si.si_signo = SIGSEGV; 252 si.si_signo = SIGSEGV;
253 si.si_errno = 0;
253 si.si_code = si_code; 254 si.si_code = si_code;
254 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); 255 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
255 force_sig_info(SIGSEGV, &si, current); 256 force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e8b5962ac12a..e2565d2d0c32 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
169 return table; 169 return table;
170 } 170 }
171 /* Allocate a fresh page */ 171 /* Allocate a fresh page */
172 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 172 page = alloc_page(GFP_KERNEL);
173 if (!page) 173 if (!page)
174 return NULL; 174 return NULL;
175 if (!pgtable_page_ctor(page)) { 175 if (!pgtable_page_ctor(page)) {
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4324b87f9398..9f0ce0e6eeb4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
437 pgste = pgste_get_lock(ptep); 437 pgste = pgste_get_lock(ptep);
438 pgstev = pgste_val(pgste); 438 pgstev = pgste_val(pgste);
439 pte = *ptep; 439 pte = *ptep;
440 if (pte_swap(pte) && 440 if (!reset && pte_swap(pte) &&
441 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || 441 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
442 (pgstev & _PGSTE_GPS_ZERO))) { 442 (pgstev & _PGSTE_GPS_ZERO))) {
443 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); 443 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f010c93a88b1..fda605dbc1b4 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
37 * | | | 37 * | | |
38 * +---------------+ | 38 * +---------------+ |
39 * | 8 byte skbp | | 39 * | 8 byte skbp | |
40 * R15+170 -> +---------------+ | 40 * R15+176 -> +---------------+ |
41 * | 8 byte hlen | | 41 * | 8 byte hlen | |
42 * R15+168 -> +---------------+ | 42 * R15+168 -> +---------------+ |
43 * | 4 byte align | | 43 * | 4 byte align | |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
58#define STK_OFF (STK_SPACE - STK_160_UNUSED) 58#define STK_OFF (STK_SPACE - STK_160_UNUSED)
59#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ 59#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
60#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ 60#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
61#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ 61#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
62 62
63#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ 63#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
64#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ 64#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9133b0ec000b..bee281f3163d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@ struct bpf_jit {
45 int labels[1]; /* Labels for local jumps */ 45 int labels[1]; /* Labels for local jumps */
46}; 46};
47 47
48#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ 48#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
49 49
50#define SEEN_SKB 1 /* skb access */ 50#define SEEN_SKB 1 /* skb access */
51#define SEEN_MEM 2 /* use mem[] for temporary storage */ 51#define SEEN_MEM 2 /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
450 emit_load_skb_data_hlen(jit); 450 emit_load_skb_data_hlen(jit);
451 if (jit->seen & SEEN_SKB_CHANGE) 451 if (jit->seen & SEEN_SKB_CHANGE)
452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ 452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
453 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 453 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
454 STK_OFF_SKBP); 454 STK_OFF_SKBP);
455} 455}
456 456
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 2e067657db98..49b012d78c1a 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42{ 42{
43 pte_t *pte; 43 pte_t *pte;
44 44
45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
46 PTE_ORDER);
47 46
48 return pte; 47 return pte;
49} 48}
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
53{ 52{
54 struct page *pte; 53 struct page *pte;
55 54
56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 55 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
57 if (!pte) 56 if (!pte)
58 return NULL; 57 return NULL;
59 clear_highpage(pte); 58 clear_highpage(pte);
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index a33673b3687d..f3f42c84c40f 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
34static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 34static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
35 unsigned long address) 35 unsigned long address)
36{ 36{
37 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 37 return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
38} 38}
39 39
40static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 40static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
43 struct page *page; 43 struct page *page;
44 void *pg; 44 void *pg;
45 45
46 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 46 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
47 if (!pg) 47 if (!pg)
48 return NULL; 48 return NULL;
49 page = virt_to_page(pg); 49 page = virt_to_page(pg);
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 26e03a1f7ca4..a62bd8696779 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -1,7 +1,7 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3 3
4#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO 4#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
5 5
6static struct kmem_cache *pgd_cachep; 6static struct kmem_cache *pgd_cachep;
7#if PAGETABLE_LEVELS > 2 7#if PAGETABLE_LEVELS > 2
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
index 10e9dabc4c41..f0700cfeedd7 100644
--- a/arch/sparc/include/asm/head_64.h
+++ b/arch/sparc/include/asm/head_64.h
@@ -15,6 +15,10 @@
15 15
16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) 16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
17 17
18#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
19#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
20#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21
18#define __CHEETAH_ID 0x003e0014 22#define __CHEETAH_ID 0x003e0014
19#define __JALAPENO_ID 0x003e0016 23#define __JALAPENO_ID 0x003e0016
20#define __SERRANO_ID 0x003e0022 24#define __SERRANO_ID 0x003e0022
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 5e3187185b4a..3529f1378cd8 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
41 41
42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
43{ 43{
44 return kmem_cache_alloc(pgtable_cache, 44 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
45 GFP_KERNEL|__GFP_REPEAT);
46} 45}
47 46
48static inline void pud_free(struct mm_struct *mm, pud_t *pud) 47static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
52 51
53static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 52static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
54{ 53{
55 return kmem_cache_alloc(pgtable_cache, 54 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
56 GFP_KERNEL|__GFP_REPEAT);
57} 55}
58 56
59static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 57static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 71b5a67522ab..781b9f1dbdc2 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
589 restored; \ 589 restored; \
590 nop; nop; nop; nop; nop; nop; \ 590 nop; nop; nop; nop; nop; nop; \
591 nop; nop; nop; nop; nop; \ 591 nop; nop; nop; nop; nop; \
592 ba,a,pt %xcc, user_rtt_fill_fixup; \ 592 ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
593 ba,a,pt %xcc, user_rtt_fill_fixup; \ 593 ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
594 ba,a,pt %xcc, user_rtt_fill_fixup; 594 ba,a,pt %xcc, user_rtt_fill_fixup;
595 595
596 596
@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
652 restored; \ 652 restored; \
653 nop; nop; nop; nop; nop; \ 653 nop; nop; nop; nop; nop; \
654 nop; nop; nop; \ 654 nop; nop; nop; \
655 ba,a,pt %xcc, user_rtt_fill_fixup; \ 655 ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
656 ba,a,pt %xcc, user_rtt_fill_fixup; \ 656 ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
657 ba,a,pt %xcc, user_rtt_fill_fixup; 657 ba,a,pt %xcc, user_rtt_fill_fixup;
658 658
659 659
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 7cf9c6ea3f1f..fdb13327fded 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
21CFLAGS_REMOVE_pcr.o := -pg 21CFLAGS_REMOVE_pcr.o := -pg
22endif 22endif
23 23
24obj-$(CONFIG_SPARC64) += urtt_fill.o
24obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 25obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
25obj-$(CONFIG_SPARC32) += etrap_32.o 26obj-$(CONFIG_SPARC32) += etrap_32.o
26obj-$(CONFIG_SPARC32) += rtrap_32.o 27obj-$(CONFIG_SPARC32) += rtrap_32.o
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index d08bdaffdbfc..216948ca4382 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -14,10 +14,6 @@
14#include <asm/visasm.h> 14#include <asm/visasm.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16 16
17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21#ifdef CONFIG_CONTEXT_TRACKING 17#ifdef CONFIG_CONTEXT_TRACKING
22# define SCHEDULE_USER schedule_user 18# define SCHEDULE_USER schedule_user
23#else 19#else
@@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
242 wrpr %g1, %cwp 238 wrpr %g1, %cwp
243 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
244 240
245user_rtt_fill_fixup: 241user_rtt_fill_fixup_dax:
246 rdpr %cwp, %g1 242 ba,pt %xcc, user_rtt_fill_fixup_common
247 add %g1, 1, %g1 243 mov 1, %g3
248 wrpr %g1, 0x0, %cwp
249
250 rdpr %wstate, %g2
251 sll %g2, 3, %g2
252 wrpr %g2, 0x0, %wstate
253
254 /* We know %canrestore and %otherwin are both zero. */
255
256 sethi %hi(sparc64_kern_pri_context), %g2
257 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
258 mov PRIMARY_CONTEXT, %g1
259
260661: stxa %g2, [%g1] ASI_DMMU
261 .section .sun4v_1insn_patch, "ax"
262 .word 661b
263 stxa %g2, [%g1] ASI_MMU
264 .previous
265
266 sethi %hi(KERNBASE), %g1
267 flush %g1
268 244
269 or %g4, FAULT_CODE_WINFIXUP, %g4 245user_rtt_fill_fixup_mna:
270 stb %g4, [%g6 + TI_FAULT_CODE] 246 ba,pt %xcc, user_rtt_fill_fixup_common
271 stx %g5, [%g6 + TI_FAULT_ADDR] 247 mov 2, %g3
272 248
273 mov %g6, %l1 249user_rtt_fill_fixup:
274 wrpr %g0, 0x0, %tl 250 ba,pt %xcc, user_rtt_fill_fixup_common
275 251 clr %g3
276661: nop
277 .section .sun4v_1insn_patch, "ax"
278 .word 661b
279 SET_GL(0)
280 .previous
281
282 wrpr %g0, RTRAP_PSTATE, %pstate
283
284 mov %l1, %g6
285 ldx [%g6 + TI_TASK], %g4
286 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
287 call do_sparc64_fault
288 add %sp, PTREGS_OFF, %o0
289 ba,pt %xcc, rtrap
290 nop
291 252
292user_rtt_pre_restore: 253user_rtt_pre_restore:
293 add %g1, 1, %g1 254 add %g1, 1, %g1
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 3c25241fa5cb..91cc2f4ae4d9 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
138 return 0; 138 return 0;
139} 139}
140 140
141/* Checks if the fp is valid. We always build signal frames which are
142 * 16-byte aligned, therefore we can always enforce that the restore
143 * frame has that property as well.
144 */
145static bool invalid_frame_pointer(void __user *fp, int fplen)
146{
147 if ((((unsigned long) fp) & 15) ||
148 ((unsigned long)fp) > 0x100000000ULL - fplen)
149 return true;
150 return false;
151}
152
141void do_sigreturn32(struct pt_regs *regs) 153void do_sigreturn32(struct pt_regs *regs)
142{ 154{
143 struct signal_frame32 __user *sf; 155 struct signal_frame32 __user *sf;
144 compat_uptr_t fpu_save; 156 compat_uptr_t fpu_save;
145 compat_uptr_t rwin_save; 157 compat_uptr_t rwin_save;
146 unsigned int psr; 158 unsigned int psr, ufp;
147 unsigned int pc, npc; 159 unsigned int pc, npc;
148 sigset_t set; 160 sigset_t set;
149 compat_sigset_t seta; 161 compat_sigset_t seta;
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
158 sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; 170 sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
159 171
160 /* 1. Make sure we are not getting garbage from the user */ 172 /* 1. Make sure we are not getting garbage from the user */
161 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 173 if (invalid_frame_pointer(sf, sizeof(*sf)))
162 (((unsigned long) sf) & 3)) 174 goto segv;
175
176 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
177 goto segv;
178
179 if (ufp & 0x7)
163 goto segv; 180 goto segv;
164 181
165 if (get_user(pc, &sf->info.si_regs.pc) || 182 if (__get_user(pc, &sf->info.si_regs.pc) ||
166 __get_user(npc, &sf->info.si_regs.npc)) 183 __get_user(npc, &sf->info.si_regs.npc))
167 goto segv; 184 goto segv;
168 185
@@ -227,7 +244,7 @@ segv:
227asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) 244asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
228{ 245{
229 struct rt_signal_frame32 __user *sf; 246 struct rt_signal_frame32 __user *sf;
230 unsigned int psr, pc, npc; 247 unsigned int psr, pc, npc, ufp;
231 compat_uptr_t fpu_save; 248 compat_uptr_t fpu_save;
232 compat_uptr_t rwin_save; 249 compat_uptr_t rwin_save;
233 sigset_t set; 250 sigset_t set;
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
242 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; 259 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
243 260
244 /* 1. Make sure we are not getting garbage from the user */ 261 /* 1. Make sure we are not getting garbage from the user */
245 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 262 if (invalid_frame_pointer(sf, sizeof(*sf)))
246 (((unsigned long) sf) & 3))
247 goto segv; 263 goto segv;
248 264
249 if (get_user(pc, &sf->regs.pc) || 265 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
266 goto segv;
267
268 if (ufp & 0x7)
269 goto segv;
270
271 if (__get_user(pc, &sf->regs.pc) ||
250 __get_user(npc, &sf->regs.npc)) 272 __get_user(npc, &sf->regs.npc))
251 goto segv; 273 goto segv;
252 274
@@ -307,14 +329,6 @@ segv:
307 force_sig(SIGSEGV, current); 329 force_sig(SIGSEGV, current);
308} 330}
309 331
310/* Checks if the fp is valid */
311static int invalid_frame_pointer(void __user *fp, int fplen)
312{
313 if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
314 return 1;
315 return 0;
316}
317
318static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 332static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
319{ 333{
320 unsigned long sp; 334 unsigned long sp;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 52aa5e4ce5e7..c3c12efe0bc0 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -60,10 +60,22 @@ struct rt_signal_frame {
60#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) 60#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
61#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) 61#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
62 62
63/* Checks if the fp is valid. We always build signal frames which are
64 * 16-byte aligned, therefore we can always enforce that the restore
65 * frame has that property as well.
66 */
67static inline bool invalid_frame_pointer(void __user *fp, int fplen)
68{
69 if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
70 return true;
71
72 return false;
73}
74
63asmlinkage void do_sigreturn(struct pt_regs *regs) 75asmlinkage void do_sigreturn(struct pt_regs *regs)
64{ 76{
77 unsigned long up_psr, pc, npc, ufp;
65 struct signal_frame __user *sf; 78 struct signal_frame __user *sf;
66 unsigned long up_psr, pc, npc;
67 sigset_t set; 79 sigset_t set;
68 __siginfo_fpu_t __user *fpu_save; 80 __siginfo_fpu_t __user *fpu_save;
69 __siginfo_rwin_t __user *rwin_save; 81 __siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
77 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
78 90
79 /* 1. Make sure we are not getting garbage from the user */ 91 /* 1. Make sure we are not getting garbage from the user */
80 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 92 if (!invalid_frame_pointer(sf, sizeof(*sf)))
93 goto segv_and_exit;
94
95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
81 goto segv_and_exit; 96 goto segv_and_exit;
82 97
83 if (((unsigned long) sf) & 3) 98 if (ufp & 0x7)
84 goto segv_and_exit; 99 goto segv_and_exit;
85 100
86 err = __get_user(pc, &sf->info.si_regs.pc); 101 err = __get_user(pc, &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@ segv_and_exit:
127asmlinkage void do_rt_sigreturn(struct pt_regs *regs) 142asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
128{ 143{
129 struct rt_signal_frame __user *sf; 144 struct rt_signal_frame __user *sf;
130 unsigned int psr, pc, npc; 145 unsigned int psr, pc, npc, ufp;
131 __siginfo_fpu_t __user *fpu_save; 146 __siginfo_fpu_t __user *fpu_save;
132 __siginfo_rwin_t __user *rwin_save; 147 __siginfo_rwin_t __user *rwin_save;
133 sigset_t set; 148 sigset_t set;
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
135 150
136 synchronize_user_stack(); 151 synchronize_user_stack();
137 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
138 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 153 if (!invalid_frame_pointer(sf, sizeof(*sf)))
139 (((unsigned long) sf) & 0x03)) 154 goto segv;
155
156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
157 goto segv;
158
159 if (ufp & 0x7)
140 goto segv; 160 goto segv;
141 161
142 err = __get_user(pc, &sf->regs.pc); 162 err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@ segv:
178 force_sig(SIGSEGV, current); 198 force_sig(SIGSEGV, current);
179} 199}
180 200
181/* Checks if the fp is valid */
182static inline int invalid_frame_pointer(void __user *fp, int fplen)
183{
184 if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
185 return 1;
186
187 return 0;
188}
189
190static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 201static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
191{ 202{
192 unsigned long sp = regs->u_regs[UREG_FP]; 203 unsigned long sp = regs->u_regs[UREG_FP];
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 39aaec173f66..5ee930c48f4c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -234,6 +234,17 @@ do_sigsegv:
234 goto out; 234 goto out;
235} 235}
236 236
237/* Checks if the fp is valid. We always build rt signal frames which
238 * are 16-byte aligned, therefore we can always enforce that the
239 * restore frame has that property as well.
240 */
241static bool invalid_frame_pointer(void __user *fp)
242{
243 if (((unsigned long) fp) & 15)
244 return true;
245 return false;
246}
247
237struct rt_signal_frame { 248struct rt_signal_frame {
238 struct sparc_stackf ss; 249 struct sparc_stackf ss;
239 siginfo_t info; 250 siginfo_t info;
@@ -246,8 +257,8 @@ struct rt_signal_frame {
246 257
247void do_rt_sigreturn(struct pt_regs *regs) 258void do_rt_sigreturn(struct pt_regs *regs)
248{ 259{
260 unsigned long tpc, tnpc, tstate, ufp;
249 struct rt_signal_frame __user *sf; 261 struct rt_signal_frame __user *sf;
250 unsigned long tpc, tnpc, tstate;
251 __siginfo_fpu_t __user *fpu_save; 262 __siginfo_fpu_t __user *fpu_save;
252 __siginfo_rwin_t __user *rwin_save; 263 __siginfo_rwin_t __user *rwin_save;
253 sigset_t set; 264 sigset_t set;
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
261 (regs->u_regs [UREG_FP] + STACK_BIAS); 272 (regs->u_regs [UREG_FP] + STACK_BIAS);
262 273
263 /* 1. Make sure we are not getting garbage from the user */ 274 /* 1. Make sure we are not getting garbage from the user */
264 if (((unsigned long) sf) & 3) 275 if (invalid_frame_pointer(sf))
276 goto segv;
277
278 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
265 goto segv; 279 goto segv;
266 280
267 err = get_user(tpc, &sf->regs.tpc); 281 if ((ufp + STACK_BIAS) & 0x7)
282 goto segv;
283
284 err = __get_user(tpc, &sf->regs.tpc);
268 err |= __get_user(tnpc, &sf->regs.tnpc); 285 err |= __get_user(tnpc, &sf->regs.tnpc);
269 if (test_thread_flag(TIF_32BIT)) { 286 if (test_thread_flag(TIF_32BIT)) {
270 tpc &= 0xffffffff; 287 tpc &= 0xffffffff;
@@ -308,14 +325,6 @@ segv:
308 force_sig(SIGSEGV, current); 325 force_sig(SIGSEGV, current);
309} 326}
310 327
311/* Checks if the fp is valid */
312static int invalid_frame_pointer(void __user *fp)
313{
314 if (((unsigned long) fp) & 15)
315 return 1;
316 return 0;
317}
318
319static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 328static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
320{ 329{
321 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; 330 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
index 0f6eebe71e6c..e5fe8cef9a69 100644
--- a/arch/sparc/kernel/sigutil_32.c
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
48int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 48int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
49{ 49{
50 int err; 50 int err;
51
52 if (((unsigned long) fpu) & 3)
53 return -EFAULT;
54
51#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
52 if (test_tsk_thread_flag(current, TIF_USEDFPU)) 56 if (test_tsk_thread_flag(current, TIF_USEDFPU))
53 regs->psr &= ~PSR_EF; 57 regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
97 struct thread_info *t = current_thread_info(); 101 struct thread_info *t = current_thread_info();
98 int i, wsaved, err; 102 int i, wsaved, err;
99 103
100 __get_user(wsaved, &rp->wsaved); 104 if (((unsigned long) rp) & 3)
105 return -EFAULT;
106
107 get_user(wsaved, &rp->wsaved);
101 if (wsaved > NSWINS) 108 if (wsaved > NSWINS)
102 return -EFAULT; 109 return -EFAULT;
103 110
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index 387834a9c56a..36aadcbeac69 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
37 unsigned long fprs; 37 unsigned long fprs;
38 int err; 38 int err;
39 39
40 err = __get_user(fprs, &fpu->si_fprs); 40 if (((unsigned long) fpu) & 7)
41 return -EFAULT;
42
43 err = get_user(fprs, &fpu->si_fprs);
41 fprs_write(0); 44 fprs_write(0);
42 regs->tstate &= ~TSTATE_PEF; 45 regs->tstate &= ~TSTATE_PEF;
43 if (fprs & FPRS_DL) 46 if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
72 struct thread_info *t = current_thread_info(); 75 struct thread_info *t = current_thread_info();
73 int i, wsaved, err; 76 int i, wsaved, err;
74 77
75 __get_user(wsaved, &rp->wsaved); 78 if (((unsigned long) rp) & 7)
79 return -EFAULT;
80
81 get_user(wsaved, &rp->wsaved);
76 if (wsaved > NSWINS) 82 if (wsaved > NSWINS)
77 return -EFAULT; 83 return -EFAULT;
78 84
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644
index 000000000000..5604a2b051d4
--- /dev/null
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -0,0 +1,98 @@
1#include <asm/thread_info.h>
2#include <asm/trap_block.h>
3#include <asm/spitfire.h>
4#include <asm/ptrace.h>
5#include <asm/head.h>
6
7 .text
8 .align 8
9 .globl user_rtt_fill_fixup_common
10user_rtt_fill_fixup_common:
11 rdpr %cwp, %g1
12 add %g1, 1, %g1
13 wrpr %g1, 0x0, %cwp
14
15 rdpr %wstate, %g2
16 sll %g2, 3, %g2
17 wrpr %g2, 0x0, %wstate
18
19 /* We know %canrestore and %otherwin are both zero. */
20
21 sethi %hi(sparc64_kern_pri_context), %g2
22 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
23 mov PRIMARY_CONTEXT, %g1
24
25661: stxa %g2, [%g1] ASI_DMMU
26 .section .sun4v_1insn_patch, "ax"
27 .word 661b
28 stxa %g2, [%g1] ASI_MMU
29 .previous
30
31 sethi %hi(KERNBASE), %g1
32 flush %g1
33
34 mov %g4, %l4
35 mov %g5, %l5
36 brnz,pn %g3, 1f
37 mov %g3, %l3
38
39 or %g4, FAULT_CODE_WINFIXUP, %g4
40 stb %g4, [%g6 + TI_FAULT_CODE]
41 stx %g5, [%g6 + TI_FAULT_ADDR]
421:
43 mov %g6, %l1
44 wrpr %g0, 0x0, %tl
45
46661: nop
47 .section .sun4v_1insn_patch, "ax"
48 .word 661b
49 SET_GL(0)
50 .previous
51
52 wrpr %g0, RTRAP_PSTATE, %pstate
53
54 mov %l1, %g6
55 ldx [%g6 + TI_TASK], %g4
56 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
57
58 brnz,pn %l3, 1f
59 nop
60
61 call do_sparc64_fault
62 add %sp, PTREGS_OFF, %o0
63 ba,pt %xcc, rtrap
64 nop
65
661: cmp %g3, 2
67 bne,pn %xcc, 2f
68 nop
69
70 sethi %hi(tlb_type), %g1
71 lduw [%g1 + %lo(tlb_type)], %g1
72 cmp %g1, 3
73 bne,pt %icc, 1f
74 add %sp, PTREGS_OFF, %o0
75 mov %l4, %o2
76 call sun4v_do_mna
77 mov %l5, %o1
78 ba,a,pt %xcc, rtrap
791: mov %l4, %o1
80 mov %l5, %o2
81 call mem_address_unaligned
82 nop
83 ba,a,pt %xcc, rtrap
84
852: sethi %hi(tlb_type), %g1
86 mov %l4, %o1
87 lduw [%g1 + %lo(tlb_type)], %g1
88 mov %l5, %o2
89 cmp %g1, 3
90 bne,pt %icc, 1f
91 add %sp, PTREGS_OFF, %o0
92 call sun4v_data_access_exception
93 nop
94 ba,a,pt %xcc, rtrap
95
961: call spitfire_data_access_exception
97 nop
98 ba,a,pt %xcc, rtrap
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 652683cb4b4b..aec508e37490 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
2704pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2704pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2705 unsigned long address) 2705 unsigned long address)
2706{ 2706{
2707 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2707 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2708 __GFP_REPEAT | __GFP_ZERO);
2709 pte_t *pte = NULL; 2708 pte_t *pte = NULL;
2710 2709
2711 if (page) 2710 if (page)
@@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2717pgtable_t pte_alloc_one(struct mm_struct *mm, 2716pgtable_t pte_alloc_one(struct mm_struct *mm,
2718 unsigned long address) 2717 unsigned long address)
2719{ 2718{
2720 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2719 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2721 __GFP_REPEAT | __GFP_ZERO);
2722 if (!page) 2720 if (!page)
2723 return NULL; 2721 return NULL;
2724 if (!pgtable_page_ctor(page)) { 2722 if (!pgtable_page_ctor(page)) {
@@ -2824,9 +2822,10 @@ void hugetlb_setup(struct pt_regs *regs)
2824 * the Data-TLB for huge pages. 2822 * the Data-TLB for huge pages.
2825 */ 2823 */
2826 if (tlb_type == cheetah_plus) { 2824 if (tlb_type == cheetah_plus) {
2825 bool need_context_reload = false;
2827 unsigned long ctx; 2826 unsigned long ctx;
2828 2827
2829 spin_lock(&ctx_alloc_lock); 2828 spin_lock_irq(&ctx_alloc_lock);
2830 ctx = mm->context.sparc64_ctx_val; 2829 ctx = mm->context.sparc64_ctx_val;
2831 ctx &= ~CTX_PGSZ_MASK; 2830 ctx &= ~CTX_PGSZ_MASK;
2832 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; 2831 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2844,12 @@ void hugetlb_setup(struct pt_regs *regs)
2845 * also executing in this address space. 2844 * also executing in this address space.
2846 */ 2845 */
2847 mm->context.sparc64_ctx_val = ctx; 2846 mm->context.sparc64_ctx_val = ctx;
2848 on_each_cpu(context_reload, mm, 0); 2847 need_context_reload = true;
2849 } 2848 }
2850 spin_unlock(&ctx_alloc_lock); 2849 spin_unlock_irq(&ctx_alloc_lock);
2850
2851 if (need_context_reload)
2852 on_each_cpu(context_reload, mm, 0);
2851 } 2853 }
2852} 2854}
2853#endif 2855#endif
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 4b7cef9e94e0..c1467ac59ce6 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -78,7 +78,7 @@ struct thread_info {
78 78
79#ifndef __ASSEMBLY__ 79#ifndef __ASSEMBLY__
80 80
81void arch_release_thread_info(struct thread_info *info); 81void arch_release_thread_stack(unsigned long *stack);
82 82
83/* How to get the thread information struct from C. */ 83/* How to get the thread information struct from C. */
84register unsigned long stack_pointer __asm__("sp"); 84register unsigned long stack_pointer __asm__("sp");
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 6b705ccc9cc1..a465d8372edd 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
73/* 73/*
74 * Release a thread_info structure 74 * Release a thread_info structure
75 */ 75 */
76void arch_release_thread_info(struct thread_info *info) 76void arch_release_thread_stack(unsigned long *stack)
77{ 77{
78 struct thread_info *info = (void *)stack;
78 struct single_step_state *step_state = info->step_state; 79 struct single_step_state *step_state = info->step_state;
79 80
80 if (step_state) { 81 if (step_state) {
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 7bf2491a9c1f..c4d5bf841a7f 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
231struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, 231struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
232 int order) 232 int order)
233{ 233{
234 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; 234 gfp_t flags = GFP_KERNEL|__GFP_ZERO;
235 struct page *p; 235 struct page *p;
236 int i; 236 int i;
237 237
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index b2a2dff50b4e..e7437ec62710 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
204{ 204{
205 pte_t *pte; 205 pte_t *pte;
206 206
207 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 207 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
208 return pte; 208 return pte;
209} 209}
210 210
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
212{ 212{
213 struct page *pte; 213 struct page *pte;
214 214
215 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 215 pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
216 if (!pte) 216 if (!pte)
217 return NULL; 217 return NULL;
218 if (!pgtable_page_ctor(pte)) { 218 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 2e02d1356fdf..26775793c204 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
28#define pgd_alloc(mm) get_pgd_slow(mm) 28#define pgd_alloc(mm) get_pgd_slow(mm)
29#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) 29#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
30 30
31#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 31#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
32 32
33/* 33/*
34 * Allocate one PTE table. 34 * Allocate one PTE table.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0a7b885964ba..d9a94da0c29f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK
2439 2439
2440source "drivers/pci/Kconfig" 2440source "drivers/pci/Kconfig"
2441 2441
2442config ISA_BUS
2443 bool "ISA-style bus support on modern systems" if EXPERT
2444 select ISA_BUS_API
2445 help
2446 Enables ISA-style drivers on modern systems. This is necessary to
2447 support PC/104 devices on X86_64 platforms.
2448
2449 If unsure, say N.
2450
2442# x86_64 have no ISA slots, but can have ISA-style DMA. 2451# x86_64 have no ISA slots, but can have ISA-style DMA.
2443config ISA_DMA_API 2452config ISA_DMA_API
2444 bool "ISA-style DMA support" if (X86_64 && EXPERT) 2453 bool "ISA-style DMA support" if (X86_64 && EXPERT)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 700a9c6e6159..be8e688fa0d4 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
162 for i in lib lib64 share end ; do \ 162 for i in lib lib64 share end ; do \
163 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ 163 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
164 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ 164 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
165 if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
166 cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
167 fi ; \
165 break ; \ 168 break ; \
166 fi ; \ 169 fi ; \
167 if [ $$i = end ] ; then exit 1 ; fi ; \ 170 if [ $$i = end ] ; then exit 1 ; fi ; \
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 99c4bab123cd..e30eef4f29a6 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
714 int i; 714 int i;
715 715
716 for (i = 0; i < rapl_pmus->maxpkg; i++) 716 for (i = 0; i < rapl_pmus->maxpkg; i++)
717 kfree(rapl_pmus->pmus + i); 717 kfree(rapl_pmus->pmus[i]);
718 kfree(rapl_pmus); 718 kfree(rapl_pmus);
719} 719}
720 720
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index b2625867ebd1..874e8bd64d1d 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
2868 .format_group = &hswep_uncore_cbox_format_group, 2868 .format_group = &hswep_uncore_cbox_format_group,
2869}; 2869};
2870 2870
2871static struct intel_uncore_type bdx_uncore_sbox = {
2872 .name = "sbox",
2873 .num_counters = 4,
2874 .num_boxes = 4,
2875 .perf_ctr_bits = 48,
2876 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2877 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2878 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2879 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2880 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2881 .ops = &hswep_uncore_sbox_msr_ops,
2882 .format_group = &hswep_uncore_sbox_format_group,
2883};
2884
2885#define BDX_MSR_UNCORE_SBOX 3
2886
2887static struct intel_uncore_type *bdx_msr_uncores[] = { 2871static struct intel_uncore_type *bdx_msr_uncores[] = {
2888 &bdx_uncore_ubox, 2872 &bdx_uncore_ubox,
2889 &bdx_uncore_cbox, 2873 &bdx_uncore_cbox,
2890 &hswep_uncore_pcu, 2874 &hswep_uncore_pcu,
2891 &bdx_uncore_sbox,
2892 NULL, 2875 NULL,
2893}; 2876};
2894 2877
@@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
2897 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2880 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2898 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2881 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2899 uncore_msr_uncores = bdx_msr_uncores; 2882 uncore_msr_uncores = bdx_msr_uncores;
2900
2901 /* BDX-DE doesn't have SBOX */
2902 if (boot_cpu_data.x86_model == 86)
2903 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
2904} 2883}
2905 2884
2906static struct intel_uncore_type bdx_uncore_ha = { 2885static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644
index 000000000000..6999f7d01a0d
--- /dev/null
+++ b/arch/x86/include/asm/intel-family.h
@@ -0,0 +1,68 @@
1#ifndef _ASM_X86_INTEL_FAMILY_H
2#define _ASM_X86_INTEL_FAMILY_H
3
4/*
5 * "Big Core" Processors (Branded as Core, Xeon, etc...)
6 *
7 * The "_X" parts are generally the EP and EX Xeons, or the
8 * "Extreme" ones, like Broadwell-E.
9 *
10 * Things ending in "2" are usually because we have no better
11 * name for them. There's no processor called "WESTMERE2".
12 */
13
14#define INTEL_FAM6_CORE_YONAH 0x0E
15#define INTEL_FAM6_CORE2_MEROM 0x0F
16#define INTEL_FAM6_CORE2_MEROM_L 0x16
17#define INTEL_FAM6_CORE2_PENRYN 0x17
18#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
19
20#define INTEL_FAM6_NEHALEM 0x1E
21#define INTEL_FAM6_NEHALEM_EP 0x1A
22#define INTEL_FAM6_NEHALEM_EX 0x2E
23#define INTEL_FAM6_WESTMERE 0x25
24#define INTEL_FAM6_WESTMERE2 0x1F
25#define INTEL_FAM6_WESTMERE_EP 0x2C
26#define INTEL_FAM6_WESTMERE_EX 0x2F
27
28#define INTEL_FAM6_SANDYBRIDGE 0x2A
29#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
30#define INTEL_FAM6_IVYBRIDGE 0x3A
31#define INTEL_FAM6_IVYBRIDGE_X 0x3E
32
33#define INTEL_FAM6_HASWELL_CORE 0x3C
34#define INTEL_FAM6_HASWELL_X 0x3F
35#define INTEL_FAM6_HASWELL_ULT 0x45
36#define INTEL_FAM6_HASWELL_GT3E 0x46
37
38#define INTEL_FAM6_BROADWELL_CORE 0x3D
39#define INTEL_FAM6_BROADWELL_XEON_D 0x56
40#define INTEL_FAM6_BROADWELL_GT3E 0x47
41#define INTEL_FAM6_BROADWELL_X 0x4F
42
43#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
44#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
45#define INTEL_FAM6_SKYLAKE_X 0x55
46#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
47#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
48
49/* "Small Core" Processors (Atom) */
50
51#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
52#define INTEL_FAM6_ATOM_LINCROFT 0x26
53#define INTEL_FAM6_ATOM_PENWELL 0x27
54#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
55#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
56#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
57#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
58#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
59#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
60#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
61#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
62#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
63
64/* Xeon Phi */
65
66#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
67
68#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4421b5da409d..d1d1e5094c28 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
38#define RELATIVECALL_OPCODE 0xe8 38#define RELATIVECALL_OPCODE 0xe8
39#define RELATIVE_ADDR_SIZE 4 39#define RELATIVE_ADDR_SIZE 4
40#define MAX_STACK_SIZE 64 40#define MAX_STACK_SIZE 64
41#define MIN_STACK_SIZE(ADDR) \ 41#define CUR_STACK_SIZE(ADDR) \
42 (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ 42 (current_top_of_stack() - (unsigned long)(ADDR))
43 THREAD_SIZE - (unsigned long)(ADDR))) \ 43#define MIN_STACK_SIZE(ADDR) \
44 ? (MAX_STACK_SIZE) \ 44 (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
45 : (((unsigned long)current_thread_info()) + \ 45 MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
46 THREAD_SIZE - (unsigned long)(ADDR)))
47 46
48#define flush_insn_slot(p) do { } while (0) 47#define flush_insn_slot(p) do { } while (0)
49 48
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e0fbe7e70dc1..69e62862b622 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
27#include <linux/irqbypass.h> 27#include <linux/irqbypass.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29 29
30#include <asm/apic.h>
30#include <asm/pvclock-abi.h> 31#include <asm/pvclock-abi.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32#include <asm/mtrr.h> 33#include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1368 1369
1369static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1370static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1370 1371
1372static inline int kvm_cpu_get_apicid(int mps_cpu)
1373{
1374#ifdef CONFIG_X86_LOCAL_APIC
1375 return __default_cpu_present_to_apicid(mps_cpu);
1376#else
1377 WARN_ON_ONCE(1);
1378 return BAD_APICID;
1379#endif
1380}
1381
1371#endif /* _ASM_X86_KVM_HOST_H */ 1382#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 7dc1d8fef7fd..b5fee97813cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
122 "2:\n" 122 "2:\n"
123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) 123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124 : : "c" (msr), "a"(low), "d" (high) : "memory"); 124 : : "c" (msr), "a"(low), "d" (high) : "memory");
125 if (msr_tracepoint_active(__tracepoint_read_msr)) 125 if (msr_tracepoint_active(__tracepoint_write_msr))
126 do_trace_write_msr(msr, ((u64)high << 32 | low), 0); 126 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
127} 127}
128 128
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
141 : "c" (msr), "0" (low), "d" (high), 141 : "c" (msr), "0" (low), "d" (high),
142 [fault] "i" (-EIO) 142 [fault] "i" (-EIO)
143 : "memory"); 143 : "memory");
144 if (msr_tracepoint_active(__tracepoint_read_msr)) 144 if (msr_tracepoint_active(__tracepoint_write_msr))
145 do_trace_write_msr(msr, ((u64)high << 32 | low), err); 145 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
146 return err; 146 return err;
147} 147}
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index bf7f8b55b0f9..574c23cf761a 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82{ 82{
83 struct page *page; 83 struct page *page;
84 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); 84 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
85 if (!page) 85 if (!page)
86 return NULL; 86 return NULL;
87 if (!pgtable_pmd_page_ctor(page)) { 87 if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
125 125
126static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 126static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
127{ 127{
128 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 128 return (pud_t *)get_zeroed_page(GFP_KERNEL);
129} 129}
130 130
131static inline void pud_free(struct mm_struct *mm, pud_t *pud) 131static inline void pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 7c247e7404be..0944218af9e2 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
14struct thread_info; 14struct thread_info;
15struct stacktrace_ops; 15struct stacktrace_ops;
16 16
17typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, 17typedef unsigned long (*walk_stack_t)(struct task_struct *task,
18 unsigned long *stack, 18 unsigned long *stack,
19 unsigned long bp, 19 unsigned long bp,
20 const struct stacktrace_ops *ops, 20 const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
23 int *graph); 23 int *graph);
24 24
25extern unsigned long 25extern unsigned long
26print_context_stack(struct thread_info *tinfo, 26print_context_stack(struct task_struct *task,
27 unsigned long *stack, unsigned long bp, 27 unsigned long *stack, unsigned long bp,
28 const struct stacktrace_ops *ops, void *data, 28 const struct stacktrace_ops *ops, void *data,
29 unsigned long *end, int *graph); 29 unsigned long *end, int *graph);
30 30
31extern unsigned long 31extern unsigned long
32print_context_stack_bp(struct thread_info *tinfo, 32print_context_stack_bp(struct task_struct *task,
33 unsigned long *stack, unsigned long bp, 33 unsigned long *stack, unsigned long bp,
34 const struct stacktrace_ops *ops, void *data, 34 const struct stacktrace_ops *ops, void *data,
35 unsigned long *end, int *graph); 35 unsigned long *end, int *graph);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 84e33ff5a6d5..446702ed99dc 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2590 mem += IOAPIC_RESOURCE_NAME_SIZE; 2590 mem += IOAPIC_RESOURCE_NAME_SIZE;
2591 ioapics[i].iomem_res = &res[num];
2591 num++; 2592 num++;
2592 ioapics[i].iomem_res = res;
2593 } 2593 }
2594 2594
2595 ioapic_resources = res; 2595 ioapic_resources = res;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c343a54bed39..f5c69d8974e1 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
674 u64 value; 674 u64 value;
675 675
676 /* re-enable TopologyExtensions if switched off by BIOS */ 676 /* re-enable TopologyExtensions if switched off by BIOS */
677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
678 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 678 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
679 679
680 if (msr_set_bit(0xc0011005, 54) > 0) { 680 if (msr_set_bit(0xc0011005, 54) > 0) {
681 rdmsrl(0xc0011005, value); 681 rdmsrl(0xc0011005, value);
682 if (value & BIT_64(54)) { 682 if (value & BIT_64(54)) {
683 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 683 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
684 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 684 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
685 } 685 }
686 } 686 }
687 } 687 }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 2bb25c3fe2e8..ef8017ca5ba9 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
42static void 42static void
43print_ftrace_graph_addr(unsigned long addr, void *data, 43print_ftrace_graph_addr(unsigned long addr, void *data,
44 const struct stacktrace_ops *ops, 44 const struct stacktrace_ops *ops,
45 struct thread_info *tinfo, int *graph) 45 struct task_struct *task, int *graph)
46{ 46{
47 struct task_struct *task;
48 unsigned long ret_addr; 47 unsigned long ret_addr;
49 int index; 48 int index;
50 49
51 if (addr != (unsigned long)return_to_handler) 50 if (addr != (unsigned long)return_to_handler)
52 return; 51 return;
53 52
54 task = tinfo->task;
55 index = task->curr_ret_stack; 53 index = task->curr_ret_stack;
56 54
57 if (!task->ret_stack || index < *graph) 55 if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
68static inline void 66static inline void
69print_ftrace_graph_addr(unsigned long addr, void *data, 67print_ftrace_graph_addr(unsigned long addr, void *data,
70 const struct stacktrace_ops *ops, 68 const struct stacktrace_ops *ops,
71 struct thread_info *tinfo, int *graph) 69 struct task_struct *task, int *graph)
72{ } 70{ }
73#endif 71#endif
74 72
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
79 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 77 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
80 */ 78 */
81 79
82static inline int valid_stack_ptr(struct thread_info *tinfo, 80static inline int valid_stack_ptr(struct task_struct *task,
83 void *p, unsigned int size, void *end) 81 void *p, unsigned int size, void *end)
84{ 82{
85 void *t = tinfo; 83 void *t = task_stack_page(task);
86 if (end) { 84 if (end) {
87 if (p < end && p >= (end-THREAD_SIZE)) 85 if (p < end && p >= (end-THREAD_SIZE))
88 return 1; 86 return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
93} 91}
94 92
95unsigned long 93unsigned long
96print_context_stack(struct thread_info *tinfo, 94print_context_stack(struct task_struct *task,
97 unsigned long *stack, unsigned long bp, 95 unsigned long *stack, unsigned long bp,
98 const struct stacktrace_ops *ops, void *data, 96 const struct stacktrace_ops *ops, void *data,
99 unsigned long *end, int *graph) 97 unsigned long *end, int *graph)
100{ 98{
101 struct stack_frame *frame = (struct stack_frame *)bp; 99 struct stack_frame *frame = (struct stack_frame *)bp;
102 100
103 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { 101 while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
104 unsigned long addr; 102 unsigned long addr;
105 103
106 addr = *stack; 104 addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
112 } else { 110 } else {
113 ops->address(data, addr, 0); 111 ops->address(data, addr, 0);
114 } 112 }
115 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 113 print_ftrace_graph_addr(addr, data, ops, task, graph);
116 } 114 }
117 stack++; 115 stack++;
118 } 116 }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
121EXPORT_SYMBOL_GPL(print_context_stack); 119EXPORT_SYMBOL_GPL(print_context_stack);
122 120
123unsigned long 121unsigned long
124print_context_stack_bp(struct thread_info *tinfo, 122print_context_stack_bp(struct task_struct *task,
125 unsigned long *stack, unsigned long bp, 123 unsigned long *stack, unsigned long bp,
126 const struct stacktrace_ops *ops, void *data, 124 const struct stacktrace_ops *ops, void *data,
127 unsigned long *end, int *graph) 125 unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
129 struct stack_frame *frame = (struct stack_frame *)bp; 127 struct stack_frame *frame = (struct stack_frame *)bp;
130 unsigned long *ret_addr = &frame->return_address; 128 unsigned long *ret_addr = &frame->return_address;
131 129
132 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 130 while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
133 unsigned long addr = *ret_addr; 131 unsigned long addr = *ret_addr;
134 132
135 if (!__kernel_text_address(addr)) 133 if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
139 break; 137 break;
140 frame = frame->next_frame; 138 frame = frame->next_frame;
141 ret_addr = &frame->return_address; 139 ret_addr = &frame->return_address;
142 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 140 print_ftrace_graph_addr(addr, data, ops, task, graph);
143 } 141 }
144 142
145 return (unsigned long)frame; 143 return (unsigned long)frame;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 464ffd69b92e..fef917e79b9d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
61 bp = stack_frame(task, regs); 61 bp = stack_frame(task, regs);
62 62
63 for (;;) { 63 for (;;) {
64 struct thread_info *context;
65 void *end_stack; 64 void *end_stack;
66 65
67 end_stack = is_hardirq_stack(stack, cpu); 66 end_stack = is_hardirq_stack(stack, cpu);
68 if (!end_stack) 67 if (!end_stack)
69 end_stack = is_softirq_stack(stack, cpu); 68 end_stack = is_softirq_stack(stack, cpu);
70 69
71 context = task_thread_info(task); 70 bp = ops->walk_stack(task, stack, bp, ops, data,
72 bp = ops->walk_stack(context, stack, bp, ops, data,
73 end_stack, &graph); 71 end_stack, &graph);
74 72
75 /* Stop if not on irq stack */ 73 /* Stop if not on irq stack */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5f1c6266eb30..d558a8a49016 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
153 const struct stacktrace_ops *ops, void *data) 153 const struct stacktrace_ops *ops, void *data)
154{ 154{
155 const unsigned cpu = get_cpu(); 155 const unsigned cpu = get_cpu();
156 struct thread_info *tinfo;
157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); 156 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
158 unsigned long dummy; 157 unsigned long dummy;
159 unsigned used = 0; 158 unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
179 * current stack address. If the stacks consist of nested 178 * current stack address. If the stacks consist of nested
180 * exceptions 179 * exceptions
181 */ 180 */
182 tinfo = task_thread_info(task);
183 while (!done) { 181 while (!done) {
184 unsigned long *stack_end; 182 unsigned long *stack_end;
185 enum stack_type stype; 183 enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
202 if (ops->stack(data, id) < 0) 200 if (ops->stack(data, id) < 0)
203 break; 201 break;
204 202
205 bp = ops->walk_stack(tinfo, stack, bp, ops, 203 bp = ops->walk_stack(task, stack, bp, ops,
206 data, stack_end, &graph); 204 data, stack_end, &graph);
207 ops->stack(data, "<EOE>"); 205 ops->stack(data, "<EOE>");
208 /* 206 /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
218 216
219 if (ops->stack(data, "IRQ") < 0) 217 if (ops->stack(data, "IRQ") < 0)
220 break; 218 break;
221 bp = ops->walk_stack(tinfo, stack, bp, 219 bp = ops->walk_stack(task, stack, bp,
222 ops, data, stack_end, &graph); 220 ops, data, stack_end, &graph);
223 /* 221 /*
224 * We link to the next stack (which would be 222 * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
240 /* 238 /*
241 * This handles the process stack: 239 * This handles the process stack:
242 */ 240 */
243 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); 241 bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
244 put_cpu(); 242 put_cpu();
245} 243}
246EXPORT_SYMBOL(dump_trace); 244EXPORT_SYMBOL(dump_trace);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c899137..757390eb562b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
223 * despite the efforts of the "RAM buffer" approach, which simply rounds 223 * despite the efforts of the "RAM buffer" approach, which simply rounds
224 * memory boundaries up to 64M to try to catch space that may decode 224 * memory boundaries up to 64M to try to catch space that may decode
225 * as RAM and so is not suitable for MMIO. 225 * as RAM and so is not suitable for MMIO.
226 *
227 * And yes, so far on current devices the base addr is always under 4G.
228 */ 226 */
229static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
230{
231 u32 base;
232
233 /*
234 * For the PCI IDs in this quirk, the stolen base is always
235 * in 0x5c, aka the BDSM register (yes that's really what
236 * it's called).
237 */
238 base = read_pci_config(num, slot, func, 0x5c);
239 base &= ~((1<<20) - 1);
240
241 return base;
242}
243 227
244#define KB(x) ((x) * 1024UL) 228#define KB(x) ((x) * 1024UL)
245#define MB(x) (KB (KB (x))) 229#define MB(x) (KB (KB (x)))
246#define GB(x) (MB (KB (x)))
247 230
248static size_t __init i830_tseg_size(void) 231static size_t __init i830_tseg_size(void)
249{ 232{
250 u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); 233 u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
251 234
252 if (!(tmp & TSEG_ENABLE)) 235 if (!(esmramc & TSEG_ENABLE))
253 return 0; 236 return 0;
254 237
255 if (tmp & I830_TSEG_SIZE_1M) 238 if (esmramc & I830_TSEG_SIZE_1M)
256 return MB(1); 239 return MB(1);
257 else 240 else
258 return KB(512); 241 return KB(512);
@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
260 243
261static size_t __init i845_tseg_size(void) 244static size_t __init i845_tseg_size(void)
262{ 245{
263 u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); 246 u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
247 u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
264 248
265 if (!(tmp & TSEG_ENABLE)) 249 if (!(esmramc & TSEG_ENABLE))
266 return 0; 250 return 0;
267 251
268 switch (tmp & I845_TSEG_SIZE_MASK) { 252 switch (tseg_size) {
269 case I845_TSEG_SIZE_512K: 253 case I845_TSEG_SIZE_512K: return KB(512);
270 return KB(512); 254 case I845_TSEG_SIZE_1M: return MB(1);
271 case I845_TSEG_SIZE_1M:
272 return MB(1);
273 default: 255 default:
274 WARN_ON(1); 256 WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
275 return 0;
276 } 257 }
258 return 0;
277} 259}
278 260
279static size_t __init i85x_tseg_size(void) 261static size_t __init i85x_tseg_size(void)
280{ 262{
281 u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); 263 u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
282 264
283 if (!(tmp & TSEG_ENABLE)) 265 if (!(esmramc & TSEG_ENABLE))
284 return 0; 266 return 0;
285 267
286 return MB(1); 268 return MB(1);
@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
300 * On 830/845/85x the stolen memory base isn't available in any 282 * On 830/845/85x the stolen memory base isn't available in any
301 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 283 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
302 */ 284 */
303static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) 285static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
286 size_t stolen_size)
304{ 287{
305 return i830_mem_size() - i830_tseg_size() - stolen_size; 288 return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
306} 289}
307 290
308static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) 291static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
292 size_t stolen_size)
309{ 293{
310 return i830_mem_size() - i845_tseg_size() - stolen_size; 294 return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
311} 295}
312 296
313static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) 297static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
298 size_t stolen_size)
314{ 299{
315 return i85x_mem_size() - i85x_tseg_size() - stolen_size; 300 return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
316} 301}
317 302
318static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) 303static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
304 size_t stolen_size)
319{ 305{
306 u16 toud;
307
320 /* 308 /*
321 * FIXME is the graphics stolen memory region 309 * FIXME is the graphics stolen memory region
322 * always at TOUD? Ie. is it always the last 310 * always at TOUD? Ie. is it always the last
323 * one to be allocated by the BIOS? 311 * one to be allocated by the BIOS?
324 */ 312 */
325 return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; 313 toud = read_pci_config_16(0, 0, 0, I865_TOUD);
314
315 return (phys_addr_t)toud << 16;
316}
317
318static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
319 size_t stolen_size)
320{
321 u32 bsm;
322
323 /* Almost universally we can find the Graphics Base of Stolen Memory
324 * at register BSM (0x5c) in the igfx configuration space. On a few
325 * (desktop) machines this is also mirrored in the bridge device at
326 * different locations, or in the MCHBAR.
327 */
328 bsm = read_pci_config(num, slot, func, INTEL_BSM);
329
330 return (phys_addr_t)bsm & INTEL_BSM_MASK;
326} 331}
327 332
328static size_t __init i830_stolen_size(int num, int slot, int func) 333static size_t __init i830_stolen_size(int num, int slot, int func)
329{ 334{
330 size_t stolen_size;
331 u16 gmch_ctrl; 335 u16 gmch_ctrl;
336 u16 gms;
332 337
333 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 338 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
334 339 gms = gmch_ctrl & I830_GMCH_GMS_MASK;
335 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 340
336 case I830_GMCH_GMS_STOLEN_512: 341 switch (gms) {
337 stolen_size = KB(512); 342 case I830_GMCH_GMS_STOLEN_512: return KB(512);
338 break; 343 case I830_GMCH_GMS_STOLEN_1024: return MB(1);
339 case I830_GMCH_GMS_STOLEN_1024: 344 case I830_GMCH_GMS_STOLEN_8192: return MB(8);
340 stolen_size = MB(1); 345 /* local memory isn't part of the normal address space */
341 break; 346 case I830_GMCH_GMS_LOCAL: return 0;
342 case I830_GMCH_GMS_STOLEN_8192:
343 stolen_size = MB(8);
344 break;
345 case I830_GMCH_GMS_LOCAL:
346 /* local memory isn't part of the normal address space */
347 stolen_size = 0;
348 break;
349 default: 347 default:
350 return 0; 348 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
351 } 349 }
352 350
353 return stolen_size; 351 return 0;
354} 352}
355 353
356static size_t __init gen3_stolen_size(int num, int slot, int func) 354static size_t __init gen3_stolen_size(int num, int slot, int func)
357{ 355{
358 size_t stolen_size;
359 u16 gmch_ctrl; 356 u16 gmch_ctrl;
357 u16 gms;
360 358
361 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 359 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
362 360 gms = gmch_ctrl & I855_GMCH_GMS_MASK;
363 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 361
364 case I855_GMCH_GMS_STOLEN_1M: 362 switch (gms) {
365 stolen_size = MB(1); 363 case I855_GMCH_GMS_STOLEN_1M: return MB(1);
366 break; 364 case I855_GMCH_GMS_STOLEN_4M: return MB(4);
367 case I855_GMCH_GMS_STOLEN_4M: 365 case I855_GMCH_GMS_STOLEN_8M: return MB(8);
368 stolen_size = MB(4); 366 case I855_GMCH_GMS_STOLEN_16M: return MB(16);
369 break; 367 case I855_GMCH_GMS_STOLEN_32M: return MB(32);
370 case I855_GMCH_GMS_STOLEN_8M: 368 case I915_GMCH_GMS_STOLEN_48M: return MB(48);
371 stolen_size = MB(8); 369 case I915_GMCH_GMS_STOLEN_64M: return MB(64);
372 break; 370 case G33_GMCH_GMS_STOLEN_128M: return MB(128);
373 case I855_GMCH_GMS_STOLEN_16M: 371 case G33_GMCH_GMS_STOLEN_256M: return MB(256);
374 stolen_size = MB(16); 372 case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
375 break; 373 case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
376 case I855_GMCH_GMS_STOLEN_32M: 374 case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
377 stolen_size = MB(32); 375 case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
378 break;
379 case I915_GMCH_GMS_STOLEN_48M:
380 stolen_size = MB(48);
381 break;
382 case I915_GMCH_GMS_STOLEN_64M:
383 stolen_size = MB(64);
384 break;
385 case G33_GMCH_GMS_STOLEN_128M:
386 stolen_size = MB(128);
387 break;
388 case G33_GMCH_GMS_STOLEN_256M:
389 stolen_size = MB(256);
390 break;
391 case INTEL_GMCH_GMS_STOLEN_96M:
392 stolen_size = MB(96);
393 break;
394 case INTEL_GMCH_GMS_STOLEN_160M:
395 stolen_size = MB(160);
396 break;
397 case INTEL_GMCH_GMS_STOLEN_224M:
398 stolen_size = MB(224);
399 break;
400 case INTEL_GMCH_GMS_STOLEN_352M:
401 stolen_size = MB(352);
402 break;
403 default: 376 default:
404 stolen_size = 0; 377 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
405 break;
406 } 378 }
407 379
408 return stolen_size; 380 return 0;
409} 381}
410 382
411static size_t __init gen6_stolen_size(int num, int slot, int func) 383static size_t __init gen6_stolen_size(int num, int slot, int func)
412{ 384{
413 u16 gmch_ctrl; 385 u16 gmch_ctrl;
386 u16 gms;
414 387
415 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 388 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
416 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 389 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
417 gmch_ctrl &= SNB_GMCH_GMS_MASK;
418 390
419 return gmch_ctrl << 25; /* 32 MB units */ 391 return (size_t)gms * MB(32);
420} 392}
421 393
422static size_t __init gen8_stolen_size(int num, int slot, int func) 394static size_t __init gen8_stolen_size(int num, int slot, int func)
423{ 395{
424 u16 gmch_ctrl; 396 u16 gmch_ctrl;
397 u16 gms;
425 398
426 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 399 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
427 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 400 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
428 gmch_ctrl &= BDW_GMCH_GMS_MASK; 401
429 return gmch_ctrl << 25; /* 32 MB units */ 402 return (size_t)gms * MB(32);
430} 403}
431 404
432static size_t __init chv_stolen_size(int num, int slot, int func) 405static size_t __init chv_stolen_size(int num, int slot, int func)
433{ 406{
434 u16 gmch_ctrl; 407 u16 gmch_ctrl;
408 u16 gms;
435 409
436 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 410 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
437 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 411 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
438 gmch_ctrl &= SNB_GMCH_GMS_MASK;
439 412
440 /* 413 /*
441 * 0x0 to 0x10: 32MB increments starting at 0MB 414 * 0x0 to 0x10: 32MB increments starting at 0MB
442 * 0x11 to 0x16: 4MB increments starting at 8MB 415 * 0x11 to 0x16: 4MB increments starting at 8MB
443 * 0x17 to 0x1d: 4MB increments start at 36MB 416 * 0x17 to 0x1d: 4MB increments start at 36MB
444 */ 417 */
445 if (gmch_ctrl < 0x11) 418 if (gms < 0x11)
446 return gmch_ctrl << 25; 419 return (size_t)gms * MB(32);
447 else if (gmch_ctrl < 0x17) 420 else if (gms < 0x17)
448 return (gmch_ctrl - 0x11 + 2) << 22; 421 return (size_t)(gms - 0x11 + 2) * MB(4);
449 else 422 else
450 return (gmch_ctrl - 0x17 + 9) << 22; 423 return (size_t)(gms - 0x17 + 9) * MB(4);
451} 424}
452 425
453struct intel_stolen_funcs {
454 size_t (*size)(int num, int slot, int func);
455 u32 (*base)(int num, int slot, int func, size_t size);
456};
457
458static size_t __init gen9_stolen_size(int num, int slot, int func) 426static size_t __init gen9_stolen_size(int num, int slot, int func)
459{ 427{
460 u16 gmch_ctrl; 428 u16 gmch_ctrl;
429 u16 gms;
461 430
462 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 431 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
463 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 432 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
464 gmch_ctrl &= BDW_GMCH_GMS_MASK;
465 433
466 if (gmch_ctrl < 0xf0) 434 /* 0x0 to 0xef: 32MB increments starting at 0MB */
467 return gmch_ctrl << 25; /* 32 MB units */ 435 /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
436 if (gms < 0xf0)
437 return (size_t)gms * MB(32);
468 else 438 else
469 /* 4MB increments starting at 0xf0 for 4MB */ 439 return (size_t)(gms - 0xf0 + 1) * MB(4);
470 return (gmch_ctrl - 0xf0 + 1) << 22;
471} 440}
472 441
473typedef size_t (*stolen_size_fn)(int num, int slot, int func); 442struct intel_early_ops {
443 size_t (*stolen_size)(int num, int slot, int func);
444 phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
445};
474 446
475static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { 447static const struct intel_early_ops i830_early_ops __initconst = {
476 .base = i830_stolen_base, 448 .stolen_base = i830_stolen_base,
477 .size = i830_stolen_size, 449 .stolen_size = i830_stolen_size,
478}; 450};
479 451
480static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { 452static const struct intel_early_ops i845_early_ops __initconst = {
481 .base = i845_stolen_base, 453 .stolen_base = i845_stolen_base,
482 .size = i830_stolen_size, 454 .stolen_size = i830_stolen_size,
483}; 455};
484 456
485static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { 457static const struct intel_early_ops i85x_early_ops __initconst = {
486 .base = i85x_stolen_base, 458 .stolen_base = i85x_stolen_base,
487 .size = gen3_stolen_size, 459 .stolen_size = gen3_stolen_size,
488}; 460};
489 461
490static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { 462static const struct intel_early_ops i865_early_ops __initconst = {
491 .base = i865_stolen_base, 463 .stolen_base = i865_stolen_base,
492 .size = gen3_stolen_size, 464 .stolen_size = gen3_stolen_size,
493}; 465};
494 466
495static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { 467static const struct intel_early_ops gen3_early_ops __initconst = {
496 .base = intel_stolen_base, 468 .stolen_base = gen3_stolen_base,
497 .size = gen3_stolen_size, 469 .stolen_size = gen3_stolen_size,
498}; 470};
499 471
500static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { 472static const struct intel_early_ops gen6_early_ops __initconst = {
501 .base = intel_stolen_base, 473 .stolen_base = gen3_stolen_base,
502 .size = gen6_stolen_size, 474 .stolen_size = gen6_stolen_size,
503}; 475};
504 476
505static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { 477static const struct intel_early_ops gen8_early_ops __initconst = {
506 .base = intel_stolen_base, 478 .stolen_base = gen3_stolen_base,
507 .size = gen8_stolen_size, 479 .stolen_size = gen8_stolen_size,
508}; 480};
509 481
510static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { 482static const struct intel_early_ops gen9_early_ops __initconst = {
511 .base = intel_stolen_base, 483 .stolen_base = gen3_stolen_base,
512 .size = gen9_stolen_size, 484 .stolen_size = gen9_stolen_size,
513}; 485};
514 486
515static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { 487static const struct intel_early_ops chv_early_ops __initconst = {
516 .base = intel_stolen_base, 488 .stolen_base = gen3_stolen_base,
517 .size = chv_stolen_size, 489 .stolen_size = chv_stolen_size,
518}; 490};
519 491
520static const struct pci_device_id intel_stolen_ids[] __initconst = { 492static const struct pci_device_id intel_early_ids[] __initconst = {
521 INTEL_I830_IDS(&i830_stolen_funcs), 493 INTEL_I830_IDS(&i830_early_ops),
522 INTEL_I845G_IDS(&i845_stolen_funcs), 494 INTEL_I845G_IDS(&i845_early_ops),
523 INTEL_I85X_IDS(&i85x_stolen_funcs), 495 INTEL_I85X_IDS(&i85x_early_ops),
524 INTEL_I865G_IDS(&i865_stolen_funcs), 496 INTEL_I865G_IDS(&i865_early_ops),
525 INTEL_I915G_IDS(&gen3_stolen_funcs), 497 INTEL_I915G_IDS(&gen3_early_ops),
526 INTEL_I915GM_IDS(&gen3_stolen_funcs), 498 INTEL_I915GM_IDS(&gen3_early_ops),
527 INTEL_I945G_IDS(&gen3_stolen_funcs), 499 INTEL_I945G_IDS(&gen3_early_ops),
528 INTEL_I945GM_IDS(&gen3_stolen_funcs), 500 INTEL_I945GM_IDS(&gen3_early_ops),
529 INTEL_VLV_M_IDS(&gen6_stolen_funcs), 501 INTEL_VLV_M_IDS(&gen6_early_ops),
530 INTEL_VLV_D_IDS(&gen6_stolen_funcs), 502 INTEL_VLV_D_IDS(&gen6_early_ops),
531 INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), 503 INTEL_PINEVIEW_IDS(&gen3_early_ops),
532 INTEL_I965G_IDS(&gen3_stolen_funcs), 504 INTEL_I965G_IDS(&gen3_early_ops),
533 INTEL_G33_IDS(&gen3_stolen_funcs), 505 INTEL_G33_IDS(&gen3_early_ops),
534 INTEL_I965GM_IDS(&gen3_stolen_funcs), 506 INTEL_I965GM_IDS(&gen3_early_ops),
535 INTEL_GM45_IDS(&gen3_stolen_funcs), 507 INTEL_GM45_IDS(&gen3_early_ops),
536 INTEL_G45_IDS(&gen3_stolen_funcs), 508 INTEL_G45_IDS(&gen3_early_ops),
537 INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), 509 INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
538 INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), 510 INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
539 INTEL_SNB_D_IDS(&gen6_stolen_funcs), 511 INTEL_SNB_D_IDS(&gen6_early_ops),
540 INTEL_SNB_M_IDS(&gen6_stolen_funcs), 512 INTEL_SNB_M_IDS(&gen6_early_ops),
541 INTEL_IVB_M_IDS(&gen6_stolen_funcs), 513 INTEL_IVB_M_IDS(&gen6_early_ops),
542 INTEL_IVB_D_IDS(&gen6_stolen_funcs), 514 INTEL_IVB_D_IDS(&gen6_early_ops),
543 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 515 INTEL_HSW_D_IDS(&gen6_early_ops),
544 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 516 INTEL_HSW_M_IDS(&gen6_early_ops),
545 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 517 INTEL_BDW_M_IDS(&gen8_early_ops),
546 INTEL_BDW_D_IDS(&gen8_stolen_funcs), 518 INTEL_BDW_D_IDS(&gen8_early_ops),
547 INTEL_CHV_IDS(&chv_stolen_funcs), 519 INTEL_CHV_IDS(&chv_early_ops),
548 INTEL_SKL_IDS(&gen9_stolen_funcs), 520 INTEL_SKL_IDS(&gen9_early_ops),
549 INTEL_BXT_IDS(&gen9_stolen_funcs), 521 INTEL_BXT_IDS(&gen9_early_ops),
550 INTEL_KBL_IDS(&gen9_stolen_funcs), 522 INTEL_KBL_IDS(&gen9_early_ops),
551}; 523};
552 524
553static void __init intel_graphics_stolen(int num, int slot, int func) 525static void __init
526intel_graphics_stolen(int num, int slot, int func,
527 const struct intel_early_ops *early_ops)
554{ 528{
529 phys_addr_t base, end;
555 size_t size; 530 size_t size;
531
532 size = early_ops->stolen_size(num, slot, func);
533 base = early_ops->stolen_base(num, slot, func, size);
534
535 if (!size || !base)
536 return;
537
538 end = base + size - 1;
539 printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
540 &base, &end);
541
542 /* Mark this space as reserved */
543 e820_add_region(base, size, E820_RESERVED);
544 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
545}
546
547static void __init intel_graphics_quirks(int num, int slot, int func)
548{
549 const struct intel_early_ops *early_ops;
550 u16 device;
556 int i; 551 int i;
557 u32 start;
558 u16 device, subvendor, subdevice;
559 552
560 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 553 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
561 subvendor = read_pci_config_16(num, slot, func, 554
562 PCI_SUBSYSTEM_VENDOR_ID); 555 for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
563 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 556 kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
564 557
565 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 558 if (intel_early_ids[i].device != device)
566 if (intel_stolen_ids[i].device == device) { 559 continue;
567 const struct intel_stolen_funcs *stolen_funcs = 560
568 (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; 561 early_ops = (typeof(early_ops))driver_data;
569 size = stolen_funcs->size(num, slot, func); 562
570 start = stolen_funcs->base(num, slot, func, size); 563 intel_graphics_stolen(num, slot, func, early_ops);
571 if (size && start) { 564
572 printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", 565 return;
573 start, start + (u32)size - 1);
574 /* Mark this space as reserved */
575 e820_add_region(start, size, E820_RESERVED);
576 sanitize_e820_map(e820.map,
577 ARRAY_SIZE(e820.map),
578 &e820.nr_map);
579 }
580 return;
581 }
582 } 566 }
583} 567}
584 568
@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
627 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 611 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
628 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 612 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
629 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 613 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
630 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 614 QFLAG_APPLY_ONCE, intel_graphics_quirks },
631 /* 615 /*
632 * HPET on the current version of the Baytrail platform has accuracy 616 * HPET on the current version of the Baytrail platform has accuracy
633 * problems: it will halt in deep idle state - so we disable it. 617 * problems: it will halt in deep idle state - so we disable it.
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 4d38416e2a7f..04f89caef9c4 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -57,7 +57,7 @@
57# error "Need more than one PGD for the ESPFIX hack" 57# error "Need more than one PGD for the ESPFIX hack"
58#endif 58#endif
59 59
60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
61 61
62/* This contains the *bottom* address of the espfix stack */ 62/* This contains the *bottom* address of the espfix stack */
63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); 63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 38da8f29a9c8..c627bf8d98ad 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
130 130
131void do_softirq_own_stack(void) 131void do_softirq_own_stack(void)
132{ 132{
133 struct thread_info *curstk;
134 struct irq_stack *irqstk; 133 struct irq_stack *irqstk;
135 u32 *isp, *prev_esp; 134 u32 *isp, *prev_esp;
136 135
137 curstk = current_stack();
138 irqstk = __this_cpu_read(softirq_stack); 136 irqstk = __this_cpu_read(softirq_stack);
139 137
140 /* build the stack frame on the softirq stack */ 138 /* build the stack frame on the softirq stack */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 38cf7a741250..7847e5c0e0b5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
961 * normal page fault. 961 * normal page fault.
962 */ 962 */
963 regs->ip = (unsigned long)cur->addr; 963 regs->ip = (unsigned long)cur->addr;
964 /*
965 * Trap flag (TF) has been set here because this fault
966 * happened where the single stepping will be done.
967 * So clear it by resetting the current kprobe:
968 */
969 regs->flags &= ~X86_EFLAGS_TF;
970
971 /*
972 * If the TF flag was set before the kprobe hit,
973 * don't touch it:
974 */
964 regs->flags |= kcb->kprobe_old_flags; 975 regs->flags |= kcb->kprobe_old_flags;
976
965 if (kcb->kprobe_status == KPROBE_REENTER) 977 if (kcb->kprobe_status == KPROBE_REENTER)
966 restore_previous_kprobe(kcb); 978 restore_previous_kprobe(kcb);
967 else 979 else
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d1590486204a..00f03d82e69a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
96 local_irq_disable(); 96 local_irq_disable();
97} 97}
98 98
99/*
100 * In IST context, we explicitly disable preemption. This serves two
101 * purposes: it makes it much less likely that we would accidentally
102 * schedule in IST context and it will force a warning if we somehow
103 * manage to schedule by accident.
104 */
99void ist_enter(struct pt_regs *regs) 105void ist_enter(struct pt_regs *regs)
100{ 106{
101 if (user_mode(regs)) { 107 if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
110 rcu_nmi_enter(); 116 rcu_nmi_enter();
111 } 117 }
112 118
113 /* 119 preempt_disable();
114 * We are atomic because we're on the IST stack; or we're on
115 * x86_32, in which case we still shouldn't schedule; or we're
116 * on x86_64 and entered from user mode, in which case we're
117 * still atomic unless ist_begin_non_atomic is called.
118 */
119 preempt_count_add(HARDIRQ_OFFSET);
120 120
121 /* This code is a bit fragile. Test it. */ 121 /* This code is a bit fragile. Test it. */
122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); 122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
124 124
125void ist_exit(struct pt_regs *regs) 125void ist_exit(struct pt_regs *regs)
126{ 126{
127 preempt_count_sub(HARDIRQ_OFFSET); 127 preempt_enable_no_resched();
128 128
129 if (!user_mode(regs)) 129 if (!user_mode(regs))
130 rcu_nmi_exit(); 130 rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
155 BUG_ON((unsigned long)(current_top_of_stack() - 155 BUG_ON((unsigned long)(current_top_of_stack() -
156 current_stack_pointer()) >= THREAD_SIZE); 156 current_stack_pointer()) >= THREAD_SIZE);
157 157
158 preempt_count_sub(HARDIRQ_OFFSET); 158 preempt_enable_no_resched();
159} 159}
160 160
161/** 161/**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
165 */ 165 */
166void ist_end_non_atomic(void) 166void ist_end_non_atomic(void)
167{ 167{
168 preempt_count_add(HARDIRQ_OFFSET); 168 preempt_disable();
169} 169}
170 170
171static nokprobe_inline int 171static nokprobe_inline int
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 769af907f824..7597b42a8a88 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
181 struct kvm_cpuid_entry __user *entries) 181 struct kvm_cpuid_entry __user *entries)
182{ 182{
183 int r, i; 183 int r, i;
184 struct kvm_cpuid_entry *cpuid_entries; 184 struct kvm_cpuid_entry *cpuid_entries = NULL;
185 185
186 r = -E2BIG; 186 r = -E2BIG;
187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
188 goto out; 188 goto out;
189 r = -ENOMEM; 189 r = -ENOMEM;
190 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); 190 if (cpuid->nent) {
191 if (!cpuid_entries) 191 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
192 goto out; 192 cpuid->nent);
193 r = -EFAULT; 193 if (!cpuid_entries)
194 if (copy_from_user(cpuid_entries, entries, 194 goto out;
195 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 195 r = -EFAULT;
196 goto out_free; 196 if (copy_from_user(cpuid_entries, entries,
197 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
198 goto out;
199 }
197 for (i = 0; i < cpuid->nent; i++) { 200 for (i = 0; i < cpuid->nent; i++) {
198 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; 201 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
199 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; 202 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
212 kvm_x86_ops->cpuid_update(vcpu); 215 kvm_x86_ops->cpuid_update(vcpu);
213 r = kvm_update_cpuid(vcpu); 216 r = kvm_update_cpuid(vcpu);
214 217
215out_free:
216 vfree(cpuid_entries);
217out: 218out:
219 vfree(cpuid_entries);
218 return r; 220 return r;
219} 221}
220 222
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24e800116ab4..def97b3a392b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
336#ifdef CONFIG_X86_64 336#ifdef CONFIG_X86_64
337static void __set_spte(u64 *sptep, u64 spte) 337static void __set_spte(u64 *sptep, u64 spte)
338{ 338{
339 *sptep = spte; 339 WRITE_ONCE(*sptep, spte);
340} 340}
341 341
342static void __update_clear_spte_fast(u64 *sptep, u64 spte) 342static void __update_clear_spte_fast(u64 *sptep, u64 spte)
343{ 343{
344 *sptep = spte; 344 WRITE_ONCE(*sptep, spte);
345} 345}
346 346
347static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 347static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
390 */ 390 */
391 smp_wmb(); 391 smp_wmb();
392 392
393 ssptep->spte_low = sspte.spte_low; 393 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
394} 394}
395 395
396static void __update_clear_spte_fast(u64 *sptep, u64 spte) 396static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 ssptep = (union split_spte *)sptep; 400 ssptep = (union split_spte *)sptep;
401 sspte = (union split_spte)spte; 401 sspte = (union split_spte)spte;
402 402
403 ssptep->spte_low = sspte.spte_low; 403 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
404 404
405 /* 405 /*
406 * If we map the spte from present to nonpresent, we should clear 406 * If we map the spte from present to nonpresent, we should clear
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1163e8173e5a..16ef31b87452 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
238 238
239/* enable / disable AVIC */ 239/* enable / disable AVIC */
240static int avic; 240static int avic;
241#ifdef CONFIG_X86_LOCAL_APIC
241module_param(avic, int, S_IRUGO); 242module_param(avic, int, S_IRUGO);
243#endif
242 244
243static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 245static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
244static void svm_flush_tlb(struct kvm_vcpu *vcpu); 246static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
981 } else 983 } else
982 kvm_disable_tdp(); 984 kvm_disable_tdp();
983 985
984 if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))) 986 if (avic) {
985 avic = false; 987 if (!npt_enabled ||
986 988 !boot_cpu_has(X86_FEATURE_AVIC) ||
987 if (avic) 989 !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
988 pr_info("AVIC enabled\n"); 990 avic = false;
991 else
992 pr_info("AVIC enabled\n");
993 }
989 994
990 return 0; 995 return 0;
991 996
@@ -1324,7 +1329,7 @@ free_avic:
1324static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) 1329static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1325{ 1330{
1326 u64 entry; 1331 u64 entry;
1327 int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu); 1332 int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
1328 struct vcpu_svm *svm = to_svm(vcpu); 1333 struct vcpu_svm *svm = to_svm(vcpu);
1329 1334
1330 if (!kvm_vcpu_apicv_active(vcpu)) 1335 if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1349{ 1354{
1350 u64 entry; 1355 u64 entry;
1351 /* ID = 0xff (broadcast), ID > 0xff (reserved) */ 1356 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
1352 int h_physical_id = __default_cpu_present_to_apicid(cpu); 1357 int h_physical_id = kvm_cpu_get_apicid(cpu);
1353 struct vcpu_svm *svm = to_svm(vcpu); 1358 struct vcpu_svm *svm = to_svm(vcpu);
1354 1359
1355 if (!kvm_vcpu_apicv_active(vcpu)) 1360 if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4236 4241
4237 if (avic_vcpu_is_running(vcpu)) 4242 if (avic_vcpu_is_running(vcpu))
4238 wrmsrl(SVM_AVIC_DOORBELL, 4243 wrmsrl(SVM_AVIC_DOORBELL,
4239 __default_cpu_present_to_apicid(vcpu->cpu)); 4244 kvm_cpu_get_apicid(vcpu->cpu));
4240 else 4245 else
4241 kvm_vcpu_wake_up(vcpu); 4246 kvm_vcpu_wake_up(vcpu);
4242} 4247}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fb93010beaa4..003618e324ce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2072 unsigned int dest; 2072 unsigned int dest;
2073 2073
2074 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2074 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2075 !irq_remapping_cap(IRQ_POSTING_CAP)) 2075 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2076 !kvm_vcpu_apicv_active(vcpu))
2076 return; 2077 return;
2077 2078
2078 do { 2079 do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2180 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 2181 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2181 2182
2182 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2183 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2183 !irq_remapping_cap(IRQ_POSTING_CAP)) 2184 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2185 !kvm_vcpu_apicv_active(vcpu))
2184 return; 2186 return;
2185 2187
2186 /* Set SN when the vCPU is preempted */ 2188 /* Set SN when the vCPU is preempted */
@@ -10714,7 +10716,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
10714 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 10716 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
10715 10717
10716 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 10718 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10717 !irq_remapping_cap(IRQ_POSTING_CAP)) 10719 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10720 !kvm_vcpu_apicv_active(vcpu))
10718 return 0; 10721 return 0;
10719 10722
10720 vcpu->pre_pcpu = vcpu->cpu; 10723 vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10783,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
10780 unsigned long flags; 10783 unsigned long flags;
10781 10784
10782 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 10785 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10783 !irq_remapping_cap(IRQ_POSTING_CAP)) 10786 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10787 !kvm_vcpu_apicv_active(vcpu))
10784 return; 10788 return;
10785 10789
10786 do { 10790 do {
@@ -10833,7 +10837,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
10833 int idx, ret = -EINVAL; 10837 int idx, ret = -EINVAL;
10834 10838
10835 if (!kvm_arch_has_assigned_device(kvm) || 10839 if (!kvm_arch_has_assigned_device(kvm) ||
10836 !irq_remapping_cap(IRQ_POSTING_CAP)) 10840 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10841 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
10837 return 0; 10842 return 0;
10838 10843
10839 idx = srcu_read_lock(&kvm->irq_srcu); 10844 idx = srcu_read_lock(&kvm->irq_srcu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c805cf494154..902d9da12392 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2314 case MSR_AMD64_NB_CFG: 2314 case MSR_AMD64_NB_CFG:
2315 case MSR_FAM10H_MMIO_CONF_BASE: 2315 case MSR_FAM10H_MMIO_CONF_BASE:
2316 case MSR_AMD64_BU_CFG2: 2316 case MSR_AMD64_BU_CFG2:
2317 case MSR_IA32_PERF_CTL:
2317 msr_info->data = 0; 2318 msr_info->data = 0;
2318 break; 2319 break;
2319 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 2320 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2972 | KVM_VCPUEVENT_VALID_SMM)) 2973 | KVM_VCPUEVENT_VALID_SMM))
2973 return -EINVAL; 2974 return -EINVAL;
2974 2975
2976 if (events->exception.injected &&
2977 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
2978 return -EINVAL;
2979
2975 process_nmi(vcpu); 2980 process_nmi(vcpu);
2976 vcpu->arch.exception.pending = events->exception.injected; 2981 vcpu->arch.exception.pending = events->exception.injected;
2977 vcpu->arch.exception.nr = events->exception.nr; 2982 vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3036 if (dbgregs->flags) 3041 if (dbgregs->flags)
3037 return -EINVAL; 3042 return -EINVAL;
3038 3043
3044 if (dbgregs->dr6 & ~0xffffffffull)
3045 return -EINVAL;
3046 if (dbgregs->dr7 & ~0xffffffffull)
3047 return -EINVAL;
3048
3039 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 3049 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3040 kvm_update_dr0123(vcpu); 3050 kvm_update_dr0123(vcpu);
3041 vcpu->arch.dr6 = dbgregs->dr6; 3051 vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7815 7825
7816 slot = id_to_memslot(slots, id); 7826 slot = id_to_memslot(slots, id);
7817 if (size) { 7827 if (size) {
7818 if (WARN_ON(slot->npages)) 7828 if (slot->npages)
7819 return -EEXIST; 7829 return -EEXIST;
7820 7830
7821 /* 7831 /*
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 4eb287e25043..aa0ff4b02a96 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -6,7 +6,7 @@
6#include <asm/fixmap.h> 6#include <asm/fixmap.h>
7#include <asm/mtrr.h> 7#include <asm/mtrr.h>
8 8
9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
10 10
11#ifdef CONFIG_HIGHPTE 11#ifdef CONFIG_HIGHPTE
12#define PGALLOC_USER_GFP __GFP_HIGHMEM 12#define PGALLOC_USER_GFP __GFP_HIGHMEM
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6e7242be1c87..b226b3f497f1 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
139 if (efi_enabled(EFI_OLD_MEMMAP)) 139 if (efi_enabled(EFI_OLD_MEMMAP))
140 return 0; 140 return 0;
141 141
142 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; 142 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
143 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 143 efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
144 if (!efi_pgd) 144 if (!efi_pgd)
145 return -ENOMEM; 145 return -ENOMEM;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 478a2de543a5..67433714b791 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
1113 1113
1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant. 1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1115 * We include the PMD passed in on _both_ boundaries. */ 1115 * We include the PMD passed in on _both_ boundaries. */
1116 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); 1116 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1117 pmd++, vaddr += PMD_SIZE) { 1117 pmd++, vaddr += PMD_SIZE) {
1118 if (pmd_none(*pmd)) 1118 if (pmd_none(*pmd))
1119 continue; 1119 continue;
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1551#endif 1551#endif
1552} 1552}
1553 1553
1554#ifdef CONFIG_X86_32
1555static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1556{
1557 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1558 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1559 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1560 pte_val_ma(pte));
1561
1562 return pte;
1563}
1564#else /* CONFIG_X86_64 */
1565static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1566{
1567 unsigned long pfn;
1568
1569 if (xen_feature(XENFEAT_writable_page_tables) ||
1570 xen_feature(XENFEAT_auto_translated_physmap) ||
1571 xen_start_info->mfn_list >= __START_KERNEL_map)
1572 return pte;
1573
1574 /*
1575 * Pages belonging to the initial p2m list mapped outside the default
1576 * address range must be mapped read-only. This region contains the
1577 * page tables for mapping the p2m list, too, and page tables MUST be
1578 * mapped read-only.
1579 */
1580 pfn = pte_pfn(pte);
1581 if (pfn >= xen_start_info->first_p2m_pfn &&
1582 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1583 pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
1584
1585 return pte;
1586}
1587#endif /* CONFIG_X86_64 */
1588
1589/* 1554/*
1590 * Init-time set_pte while constructing initial pagetables, which 1555 * Init-time set_pte while constructing initial pagetables, which
1591 * doesn't allow RO page table pages to be remapped RW. 1556 * doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1600 * so always write the PTE directly and rely on Xen trapping and 1565 * so always write the PTE directly and rely on Xen trapping and
1601 * emulating any updates as necessary. 1566 * emulating any updates as necessary.
1602 */ 1567 */
1603static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) 1568__visible pte_t xen_make_pte_init(pteval_t pte)
1604{ 1569{
1605 if (pte_mfn(pte) != INVALID_P2M_ENTRY) 1570#ifdef CONFIG_X86_64
1606 pte = mask_rw_pte(ptep, pte); 1571 unsigned long pfn;
1607 else 1572
1608 pte = __pte_ma(0); 1573 /*
1574 * Pages belonging to the initial p2m list mapped outside the default
1575 * address range must be mapped read-only. This region contains the
1576 * page tables for mapping the p2m list, too, and page tables MUST be
1577 * mapped read-only.
1578 */
1579 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1580 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1581 pfn >= xen_start_info->first_p2m_pfn &&
1582 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1583 pte &= ~_PAGE_RW;
1584#endif
1585 pte = pte_pfn_to_mfn(pte);
1586 return native_make_pte(pte);
1587}
1588PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1609 1589
1590static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1591{
1592#ifdef CONFIG_X86_32
1593 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1594 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1595 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1596 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1597 pte_val_ma(pte));
1598#endif
1610 native_set_pte(ptep, pte); 1599 native_set_pte(ptep, pte);
1611} 1600}
1612 1601
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
2407 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2396 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2408 pv_mmu_ops.release_pud = xen_release_pud; 2397 pv_mmu_ops.release_pud = xen_release_pud;
2409#endif 2398#endif
2399 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2410 2400
2411#ifdef CONFIG_X86_64 2401#ifdef CONFIG_X86_64
2412 pv_mmu_ops.write_cr3 = &xen_write_cr3; 2402 pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2455 .pte_val = PV_CALLEE_SAVE(xen_pte_val), 2445 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2456 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), 2446 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2457 2447
2458 .make_pte = PV_CALLEE_SAVE(xen_make_pte), 2448 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2459 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), 2449 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2460 2450
2461#ifdef CONFIG_X86_PAE 2451#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index cab9f766bb06..dd2a49a8aacc 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
182 if (unlikely(!slab_is_available())) 182 if (unlikely(!slab_is_available()))
183 return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 183 return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
184 184
185 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 185 return (void *)__get_free_page(GFP_KERNEL);
186} 186}
187 187
188static void __ref free_p2m_page(void *p) 188static void __ref free_p2m_page(void *p)
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d38eb9237e64..1065bc8bcae5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
44 pte_t *ptep; 44 pte_t *ptep;
45 int i; 45 int i;
46 46
47 ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 47 ptep = (pte_t *)__get_free_page(GFP_KERNEL);
48 if (!ptep) 48 if (!ptep)
49 return NULL; 49 return NULL;
50 for (i = 0; i < 1024; i++) 50 for (i = 0; i < 1024; i++)
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 23d7f301a196..9e29dc351695 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
113 ret = submit_bio_wait(type, bio); 113 ret = submit_bio_wait(type, bio);
114 if (ret == -EOPNOTSUPP) 114 if (ret == -EOPNOTSUPP)
115 ret = 0; 115 ret = 0;
116 bio_put(bio);
116 } 117 }
117 blk_finish_plug(&plug); 118 blk_finish_plug(&plug);
118 119
@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
165 } 166 }
166 } 167 }
167 168
168 if (bio) 169 if (bio) {
169 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); 170 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
171 bio_put(bio);
172 }
170 return ret != -EOPNOTSUPP ? ret : 0; 173 return ret != -EOPNOTSUPP ? ret : 0;
171} 174}
172EXPORT_SYMBOL(blkdev_issue_write_same); 175EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
206 } 209 }
207 } 210 }
208 211
209 if (bio) 212 if (bio) {
210 return submit_bio_wait(WRITE, bio); 213 ret = submit_bio_wait(WRITE, bio);
214 bio_put(bio);
215 return ret;
216 }
211 return 0; 217 return 0;
212} 218}
213 219
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 29cbc1b5fbdb..f9b9049b1284 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1262 1262
1263 blk_queue_split(q, &bio, q->bio_split); 1263 blk_queue_split(q, &bio, q->bio_split);
1264 1264
1265 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1265 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1266 if (blk_attempt_plug_merge(q, bio, &request_count, 1266 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1267 &same_queue_rq)) 1267 return BLK_QC_T_NONE;
1268 return BLK_QC_T_NONE;
1269 } else
1270 request_count = blk_plug_queued_count(q);
1271 1268
1272 rq = blk_mq_map_request(q, bio, &data); 1269 rq = blk_mq_map_request(q, bio, &data);
1273 if (unlikely(!rq)) 1270 if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1358 1355
1359 blk_queue_split(q, &bio, q->bio_split); 1356 blk_queue_split(q, &bio, q->bio_split);
1360 1357
1361 if (!is_flush_fua && !blk_queue_nomerges(q) && 1358 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1362 blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1359 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1363 return BLK_QC_T_NONE; 1360 return BLK_QC_T_NONE;
1361 } else
1362 request_count = blk_plug_queued_count(q);
1364 1363
1365 rq = blk_mq_map_request(q, bio, &data); 1364 rq = blk_mq_map_request(q, bio, &data);
1366 if (unlikely(!rq)) 1365 if (unlikely(!rq))
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index e28e912000a7..331f6baf2df8 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
13 tristate "Asymmetric public-key crypto algorithm subtype" 13 tristate "Asymmetric public-key crypto algorithm subtype"
14 select MPILIB 14 select MPILIB
15 select CRYPTO_HASH_INFO 15 select CRYPTO_HASH_INFO
16 select CRYPTO_AKCIPHER
16 help 17 help
17 This option provides support for asymmetric public key type handling. 18 This option provides support for asymmetric public key type handling.
18 If signature generation and/or verification are to be used, 19 If signature generation and/or verification are to be used,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0d92d0f915e9..c7ba948d253c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
332 332
333 pr->pblk = object.processor.pblk_address; 333 pr->pblk = object.processor.pblk_address;
334
335 /*
336 * We don't care about error returns - we just try to mark
337 * these reserved so that nobody else is confused into thinking
338 * that this region might be unused..
339 *
340 * (In particular, allocating the IO range for Cardbus)
341 */
342 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
343 } 334 }
344 335
345 /* 336 /*
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3d5b8a099351..c1d138e128cb 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
754} 754}
755 755
756int acpi_video_get_levels(struct acpi_device *device, 756int acpi_video_get_levels(struct acpi_device *device,
757 struct acpi_video_device_brightness **dev_br) 757 struct acpi_video_device_brightness **dev_br,
758 int *pmax_level)
758{ 759{
759 union acpi_object *obj = NULL; 760 union acpi_object *obj = NULL;
760 int i, max_level = 0, count = 0, level_ac_battery = 0; 761 int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
841 842
842 br->count = count; 843 br->count = count;
843 *dev_br = br; 844 *dev_br = br;
845 if (pmax_level)
846 *pmax_level = max_level;
844 847
845out: 848out:
846 kfree(obj); 849 kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
869 struct acpi_video_device_brightness *br = NULL; 872 struct acpi_video_device_brightness *br = NULL;
870 int result = -EINVAL; 873 int result = -EINVAL;
871 874
872 result = acpi_video_get_levels(device->dev, &br); 875 result = acpi_video_get_levels(device->dev, &br, &max_level);
873 if (result) 876 if (result)
874 return result; 877 return result;
875 device->brightness = br; 878 device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
1737 1740
1738 mutex_lock(&video->device_list_lock); 1741 mutex_lock(&video->device_list_lock);
1739 list_for_each_entry(dev, &video->video_device_list, entry) { 1742 list_for_each_entry(dev, &video->video_device_list, entry) {
1740 if (!acpi_video_device_lcd_query_levels(dev, &levels)) 1743 if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
1741 kfree(levels); 1744 kfree(levels);
1742 } 1745 }
1743 mutex_unlock(&video->device_list_lock); 1746 mutex_unlock(&video->device_list_lock);
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index a1d177d58254..21932d640a41 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index,
108 108
109 /* Add the table to the namespace */ 109 /* Add the table to the namespace */
110 110
111 acpi_ex_exit_interpreter();
111 status = acpi_ns_load_table(table_index, parent_node); 112 status = acpi_ns_load_table(table_index, parent_node);
113 acpi_ex_enter_interpreter();
112 if (ACPI_FAILURE(status)) { 114 if (ACPI_FAILURE(status)) {
113 acpi_ut_remove_reference(obj_desc); 115 acpi_ut_remove_reference(obj_desc);
114 *ddb_handle = NULL; 116 *ddb_handle = NULL;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 0f18dbc9a37f..3b7fb99362b6 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
83static u8 83static u8
84acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) 84acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
85{ 85{
86 u64 address;
87
88 if (!reg->access_width) { 86 if (!reg->access_width) {
87 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
88 max_bit_width = 32;
89 }
90
89 /* 91 /*
90 * Detect old register descriptors where only the bit_width field 92 * Detect old register descriptors where only the bit_width field
91 * makes senses. The target address is copied to handle possible 93 * makes senses.
92 * alignment issues.
93 */ 94 */
94 ACPI_MOVE_64_TO_64(&address, &reg->address); 95 if (reg->bit_width < max_bit_width &&
95 if (!reg->bit_offset && reg->bit_width && 96 !reg->bit_offset && reg->bit_width &&
96 ACPI_IS_POWER_OF_TWO(reg->bit_width) && 97 ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
97 ACPI_IS_ALIGNED(reg->bit_width, 8) && 98 ACPI_IS_ALIGNED(reg->bit_width, 8)) {
98 ACPI_IS_ALIGNED(address, reg->bit_width)) {
99 return (reg->bit_width); 99 return (reg->bit_width);
100 } else {
101 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
102 return (32);
103 } else {
104 return (max_bit_width);
105 }
106 } 100 }
101 return (max_bit_width);
107 } else { 102 } else {
108 return (1 << (reg->access_width + 2)); 103 return (1 << (reg->access_width + 2));
109 } 104 }
@@ -311,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
311acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) 306acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
312{ 307{
313 u64 address; 308 u64 address;
314 u8 access_width;
315 u32 bit_width;
316 u8 bit_offset;
317 u64 value64;
318 u32 new_value32, old_value32;
319 u8 index;
320 acpi_status status; 309 acpi_status status;
321 310
322 ACPI_FUNCTION_NAME(hw_write); 311 ACPI_FUNCTION_NAME(hw_write);
@@ -328,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
328 return (status); 317 return (status);
329 } 318 }
330 319
331 /* Convert access_width into number of bits based */
332
333 access_width = acpi_hw_get_access_bit_width(reg, 32);
334 bit_width = reg->bit_offset + reg->bit_width;
335 bit_offset = reg->bit_offset;
336
337 /* 320 /*
338 * Two address spaces supported: Memory or IO. PCI_Config is 321 * Two address spaces supported: Memory or IO. PCI_Config is
339 * not supported here because the GAS structure is insufficient 322 * not supported here because the GAS structure is insufficient
340 */ 323 */
341 index = 0; 324 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
342 while (bit_width) { 325 status = acpi_os_write_memory((acpi_physical_address)
343 /* 326 address, (u64)value,
344 * Use offset style bit reads because "Index * AccessWidth" is 327 reg->bit_width);
345 * ensured to be less than 32-bits by acpi_hw_validate_register(). 328 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
346 */ 329
347 new_value32 = ACPI_GET_BITS(&value, index * access_width, 330 status = acpi_hw_write_port((acpi_io_address)
348 ACPI_MASK_BITS_ABOVE_32 331 address, value, reg->bit_width);
349 (access_width));
350
351 if (bit_offset >= access_width) {
352 bit_offset -= access_width;
353 } else {
354 /*
355 * Use offset style bit masks because access_width is ensured
356 * to be less than 32-bits by acpi_hw_validate_register() and
357 * bit_offset/bit_width is less than access_width here.
358 */
359 if (bit_offset) {
360 new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
361 }
362 if (bit_width < access_width) {
363 new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
364 }
365
366 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
367 if (bit_offset || bit_width < access_width) {
368 /*
369 * Read old values in order not to modify the bits that
370 * are beyond the register bit_width/bit_offset setting.
371 */
372 status =
373 acpi_os_read_memory((acpi_physical_address)
374 address +
375 index *
376 ACPI_DIV_8
377 (access_width),
378 &value64,
379 access_width);
380 old_value32 = (u32)value64;
381
382 /*
383 * Use offset style bit masks because access_width is
384 * ensured to be less than 32-bits by
385 * acpi_hw_validate_register() and bit_offset/bit_width is
386 * less than access_width here.
387 */
388 if (bit_offset) {
389 old_value32 &=
390 ACPI_MASK_BITS_ABOVE
391 (bit_offset);
392 bit_offset = 0;
393 }
394 if (bit_width < access_width) {
395 old_value32 &=
396 ACPI_MASK_BITS_BELOW
397 (bit_width);
398 }
399
400 new_value32 |= old_value32;
401 }
402
403 value64 = (u64)new_value32;
404 status =
405 acpi_os_write_memory((acpi_physical_address)
406 address +
407 index *
408 ACPI_DIV_8
409 (access_width),
410 value64, access_width);
411 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
412
413 if (bit_offset || bit_width < access_width) {
414 /*
415 * Read old values in order not to modify the bits that
416 * are beyond the register bit_width/bit_offset setting.
417 */
418 status =
419 acpi_hw_read_port((acpi_io_address)
420 address +
421 index *
422 ACPI_DIV_8
423 (access_width),
424 &old_value32,
425 access_width);
426
427 /*
428 * Use offset style bit masks because access_width is
429 * ensured to be less than 32-bits by
430 * acpi_hw_validate_register() and bit_offset/bit_width is
431 * less than access_width here.
432 */
433 if (bit_offset) {
434 old_value32 &=
435 ACPI_MASK_BITS_ABOVE
436 (bit_offset);
437 bit_offset = 0;
438 }
439 if (bit_width < access_width) {
440 old_value32 &=
441 ACPI_MASK_BITS_BELOW
442 (bit_width);
443 }
444
445 new_value32 |= old_value32;
446 }
447
448 status = acpi_hw_write_port((acpi_io_address)
449 address +
450 index *
451 ACPI_DIV_8
452 (access_width),
453 new_value32,
454 access_width);
455 }
456 }
457
458 /*
459 * Index * access_width is ensured to be less than 32-bits by
460 * acpi_hw_validate_register().
461 */
462 bit_width -=
463 bit_width > access_width ? access_width : bit_width;
464 index++;
465 } 332 }
466 333
467 ACPI_DEBUG_PRINT((ACPI_DB_IO, 334 ACPI_DEBUG_PRINT((ACPI_DB_IO,
468 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", 335 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
469 value, access_width, ACPI_FORMAT_UINT64(address), 336 value, reg->bit_width, ACPI_FORMAT_UINT64(address),
470 acpi_ut_get_region_name(reg->space_id))); 337 acpi_ut_get_region_name(reg->space_id)));
471 338
472 return (status); 339 return (status);
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index f631a47724f0..1783cd7e1446 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -47,6 +47,7 @@
47#include "acparser.h" 47#include "acparser.h"
48#include "acdispat.h" 48#include "acdispat.h"
49#include "actables.h" 49#include "actables.h"
50#include "acinterp.h"
50 51
51#define _COMPONENT ACPI_NAMESPACE 52#define _COMPONENT ACPI_NAMESPACE
52ACPI_MODULE_NAME("nsparse") 53ACPI_MODULE_NAME("nsparse")
@@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
170 171
171 ACPI_FUNCTION_TRACE(ns_parse_table); 172 ACPI_FUNCTION_TRACE(ns_parse_table);
172 173
174 acpi_ex_enter_interpreter();
175
173 /* 176 /*
174 * AML Parse, pass 1 177 * AML Parse, pass 1
175 * 178 *
@@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
185 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, 188 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
186 table_index, start_node); 189 table_index, start_node);
187 if (ACPI_FAILURE(status)) { 190 if (ACPI_FAILURE(status)) {
188 return_ACPI_STATUS(status); 191 goto error_exit;
189 } 192 }
190 193
191 /* 194 /*
@@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
201 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, 204 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
202 table_index, start_node); 205 table_index, start_node);
203 if (ACPI_FAILURE(status)) { 206 if (ACPI_FAILURE(status)) {
204 return_ACPI_STATUS(status); 207 goto error_exit;
205 } 208 }
206 209
210error_exit:
211 acpi_ex_exit_interpreter();
207 return_ACPI_STATUS(status); 212 return_ACPI_STATUS(status);
208} 213}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 31e8da648fff..262ca31b86d9 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
1051 * Maybe EC region is required at bus_scan/acpi_get_devices. So it 1051 * Maybe EC region is required at bus_scan/acpi_get_devices. So it
1052 * is necessary to enable it as early as possible. 1052 * is necessary to enable it as early as possible.
1053 */ 1053 */
1054 acpi_boot_ec_enable(); 1054 acpi_ec_dsdt_probe();
1055 1055
1056 printk(KERN_INFO PREFIX "Interpreter enabled\n"); 1056 printk(KERN_INFO PREFIX "Interpreter enabled\n");
1057 1057
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0e70181f150c..73c76d646064 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
1446 return AE_OK; 1446 return AE_OK;
1447} 1447}
1448 1448
1449int __init acpi_boot_ec_enable(void) 1449static const struct acpi_device_id ec_device_ids[] = {
1450 {"PNP0C09", 0},
1451 {"", 0},
1452};
1453
1454int __init acpi_ec_dsdt_probe(void)
1450{ 1455{
1451 if (!boot_ec) 1456 acpi_status status;
1457
1458 if (boot_ec)
1452 return 0; 1459 return 0;
1460
1461 /*
1462 * Finding EC from DSDT if there is no ECDT EC available. When this
1463 * function is invoked, ACPI tables have been fully loaded, we can
1464 * walk namespace now.
1465 */
1466 boot_ec = make_acpi_ec();
1467 if (!boot_ec)
1468 return -ENOMEM;
1469 status = acpi_get_devices(ec_device_ids[0].id,
1470 ec_parse_device, boot_ec, NULL);
1471 if (ACPI_FAILURE(status) || !boot_ec->handle)
1472 return -ENODEV;
1453 if (!ec_install_handlers(boot_ec)) { 1473 if (!ec_install_handlers(boot_ec)) {
1454 first_ec = boot_ec; 1474 first_ec = boot_ec;
1455 return 0; 1475 return 0;
@@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
1457 return -EFAULT; 1477 return -EFAULT;
1458} 1478}
1459 1479
1460static const struct acpi_device_id ec_device_ids[] = {
1461 {"PNP0C09", 0},
1462 {"", 0},
1463};
1464
1465#if 0 1480#if 0
1466/* 1481/*
1467 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not 1482 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9bb0773d39bf..27cc7feabfe4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
181 181
182int acpi_ec_init(void); 182int acpi_ec_init(void);
183int acpi_ec_ecdt_probe(void); 183int acpi_ec_ecdt_probe(void);
184int acpi_boot_ec_enable(void); 184int acpi_ec_dsdt_probe(void);
185void acpi_ec_block_transactions(void); 185void acpi_ec_block_transactions(void);
186void acpi_ec_unblock_transactions(void); 186void acpi_ec_unblock_transactions(void);
187void acpi_ec_unblock_transactions_early(void); 187void acpi_ec_unblock_transactions_early(void);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d746336d..c72e64893d03 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
676 if (!pr->flags.throttling) 676 if (!pr->flags.throttling)
677 return -ENODEV; 677 return -ENODEV;
678 678
679 /*
680 * We don't care about error returns - we just try to mark
681 * these reserved so that nobody else is confused into thinking
682 * that this region might be unused..
683 *
684 * (In particular, allocating the IO range for Cardbus)
685 */
686 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
687
679 pr->throttling.state = 0; 688 pr->throttling.state = 0;
680 689
681 duty_mask = pr->throttling.state_count - 1; 690 duty_mask = pr->throttling.state_count - 1;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 61dc7a99e89a..c6f017458958 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
606 ata_scsi_port_error_handler(host, ap); 606 ata_scsi_port_error_handler(host, ap);
607 607
608 /* finish or retry handled scmd's and clean up */ 608 /* finish or retry handled scmd's and clean up */
609 WARN_ON(host->host_failed || !list_empty(&eh_work_q)); 609 WARN_ON(!list_empty(&eh_work_q));
610 610
611 DPRINTK("EXIT\n"); 611 DPRINTK("EXIT\n");
612} 612}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e443be..85aaf2222587 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@ static char *res_strings[] = {
181 "reserved 27", 181 "reserved 27",
182 "reserved 28", 182 "reserved 28",
183 "reserved 29", 183 "reserved 29",
184 "reserved 30", 184 "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
185 "reassembly abort: no buffers", 185 "reassembly abort: no buffers",
186 "receive buffer overflow", 186 "receive buffer overflow",
187 "change in GFC", 187 "change in GFC",
188 "receive buffer full", 188 "receive buffer full",
189 "low priority discard - no receive descriptor", 189 "low priority discard - no receive descriptor",
190 "low priority discard - missing end of packet", 190 "low priority discard - missing end of packet",
191 "reserved 37",
192 "reserved 38",
193 "reserved 39",
194 "reseverd 40",
191 "reserved 41", 195 "reserved 41",
192 "reserved 42", 196 "reserved 42",
193 "reserved 43", 197 "reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f2994738..809dd1e02091 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
1128 /* make the ptr point to the corresponding buffer desc entry */ 1128 /* make the ptr point to the corresponding buffer desc entry */
1129 buf_desc_ptr += desc; 1129 buf_desc_ptr += desc;
1130 if (!desc || (desc > iadev->num_rx_desc) || 1130 if (!desc || (desc > iadev->num_rx_desc) ||
1131 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 1131 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1132 free_desc(dev, desc); 1132 free_desc(dev, desc);
1133 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) 1133 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1134 return -1; 1134 return -1;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 6b2a84e7f2be..2609ba20b396 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
10obj-y += power/ 10obj-y += power/
11obj-$(CONFIG_HAS_DMA) += dma-mapping.o 11obj-$(CONFIG_HAS_DMA) += dma-mapping.o
12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
13obj-$(CONFIG_ISA) += isa.o 13obj-$(CONFIG_ISA_BUS_API) += isa.o
14obj-$(CONFIG_FW_LOADER) += firmware_class.o 14obj-$(CONFIG_FW_LOADER) += firmware_class.o
15obj-$(CONFIG_NUMA) += node.o 15obj-$(CONFIG_NUMA) += node.o
16obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o 16obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 91dba65d7264..cd6ccdcf9df0 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
180 return error; 180 return error;
181} 181}
182 182
183device_initcall(isa_bus_init); 183postcore_initcall(isa_bus_init);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index db930d3ee312..2a215780eda2 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
24 24
25static void module_create_drivers_dir(struct module_kobject *mk) 25static void module_create_drivers_dir(struct module_kobject *mk)
26{ 26{
27 if (!mk || mk->drivers_dir) 27 static DEFINE_MUTEX(drivers_dir_mutex);
28 return;
29 28
30 mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); 29 mutex_lock(&drivers_dir_mutex);
30 if (mk && !mk->drivers_dir)
31 mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
32 mutex_unlock(&drivers_dir_mutex);
31} 33}
32 34
33void module_add_driver(struct module *mod, struct device_driver *drv) 35void module_add_driver(struct module *mod, struct device_driver *drv)
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 83d6e7ba1a34..8c3434bdb26d 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
211 } 211 }
212 212
213 /* Mark opp-table as multiple CPUs are sharing it now */ 213 /* Mark opp-table as multiple CPUs are sharing it now */
214 opp_table->shared_opp = true; 214 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
215 } 215 }
216unlock: 216unlock:
217 mutex_unlock(&opp_table_lock); 217 mutex_unlock(&opp_table_lock);
@@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
227 * 227 *
228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
229 * 229 *
230 * Returns -ENODEV if OPP table isn't already present. 230 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
231 * table's status is access-unknown.
231 * 232 *
232 * Locking: The internal opp_table and opp structures are RCU protected. 233 * Locking: The internal opp_table and opp structures are RCU protected.
233 * Hence this function internally uses RCU updater strategy with mutex locks 234 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
249 goto unlock; 250 goto unlock;
250 } 251 }
251 252
253 if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
254 ret = -EINVAL;
255 goto unlock;
256 }
257
252 cpumask_clear(cpumask); 258 cpumask_clear(cpumask);
253 259
254 if (opp_table->shared_opp) { 260 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
255 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 261 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
256 cpumask_set_cpu(opp_dev->dev->id, cpumask); 262 cpumask_set_cpu(opp_dev->dev->id, cpumask);
257 } else { 263 } else {
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 94d2010558e3..1dfd3dd92624 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np)
34 * But the OPPs will be considered as shared only if the 34 * But the OPPs will be considered as shared only if the
35 * OPP table contains a "opp-shared" property. 35 * OPP table contains a "opp-shared" property.
36 */ 36 */
37 return opp_table->shared_opp ? opp_table : NULL; 37 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
38 return opp_table;
39
40 return NULL;
38 } 41 }
39 } 42 }
40 43
@@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
353 } 356 }
354 357
355 opp_table->np = opp_np; 358 opp_table->np = opp_np;
356 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 359 if (of_property_read_bool(opp_np, "opp-shared"))
360 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
361 else
362 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
357 363
358 mutex_unlock(&opp_table_lock); 364 mutex_unlock(&opp_table_lock);
359 365
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 20f3be22e060..fabd5ca1a083 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -119,6 +119,12 @@ struct opp_device {
119#endif 119#endif
120}; 120};
121 121
122enum opp_table_access {
123 OPP_TABLE_ACCESS_UNKNOWN = 0,
124 OPP_TABLE_ACCESS_EXCLUSIVE = 1,
125 OPP_TABLE_ACCESS_SHARED = 2,
126};
127
122/** 128/**
123 * struct opp_table - Device opp structure 129 * struct opp_table - Device opp structure
124 * @node: table node - contains the devices with OPPs that 130 * @node: table node - contains the devices with OPPs that
@@ -166,7 +172,7 @@ struct opp_table {
166 /* For backward compatibility with v1 bindings */ 172 /* For backward compatibility with v1 bindings */
167 unsigned int voltage_tolerance_v1; 173 unsigned int voltage_tolerance_v1;
168 174
169 bool shared_opp; 175 enum opp_table_access shared_opp;
170 struct dev_pm_opp *suspend_opp; 176 struct dev_pm_opp *suspend_opp;
171 177
172 unsigned int *supported_hw; 178 unsigned int *supported_hw;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d597e432e195..ab19adb07a12 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
1750 int ret; 1750 int ret;
1751 1751
1752 /* get_zeroed_page returns page with ref count 1 */ 1752 /* get_zeroed_page returns page with ref count 1 */
1753 p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); 1753 p = (void *) get_zeroed_page(GFP_KERNEL);
1754 if (!p) 1754 if (!p)
1755 return -ENOMEM; 1755 return -ENOMEM;
1756 empty_page = virt_to_page(p); 1756 empty_page = virt_to_page(p);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 31e73a7a40f2..6a48ed41963f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
941 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); 941 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
942 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); 942 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
943 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); 943 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
944 debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); 944 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
945 945
946 return 0; 946 return 0;
947} 947}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ca13df854639..2e6d1e9c3345 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
874 const struct blk_mq_queue_data *qd) 874 const struct blk_mq_queue_data *qd)
875{ 875{
876 unsigned long flags; 876 unsigned long flags;
877 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; 877 int qid = hctx->queue_num;
878 struct blkfront_info *info = hctx->queue->queuedata;
879 struct blkfront_ring_info *rinfo = NULL;
878 880
881 BUG_ON(info->nr_rings <= qid);
882 rinfo = &info->rinfo[qid];
879 blk_mq_start_request(qd->rq); 883 blk_mq_start_request(qd->rq);
880 spin_lock_irqsave(&rinfo->ring_lock, flags); 884 spin_lock_irqsave(&rinfo->ring_lock, flags);
881 if (RING_FULL(&rinfo->ring)) 885 if (RING_FULL(&rinfo->ring))
@@ -901,20 +905,9 @@ out_busy:
901 return BLK_MQ_RQ_QUEUE_BUSY; 905 return BLK_MQ_RQ_QUEUE_BUSY;
902} 906}
903 907
904static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
905 unsigned int index)
906{
907 struct blkfront_info *info = (struct blkfront_info *)data;
908
909 BUG_ON(info->nr_rings <= index);
910 hctx->driver_data = &info->rinfo[index];
911 return 0;
912}
913
914static struct blk_mq_ops blkfront_mq_ops = { 908static struct blk_mq_ops blkfront_mq_ops = {
915 .queue_rq = blkif_queue_rq, 909 .queue_rq = blkif_queue_rq,
916 .map_queue = blk_mq_map_queue, 910 .map_queue = blk_mq_map_queue,
917 .init_hctx = blk_mq_init_hctx,
918}; 911};
919 912
920static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, 913static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
950 return PTR_ERR(rq); 943 return PTR_ERR(rq);
951 } 944 }
952 945
946 rq->queuedata = info;
953 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 947 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
954 948
955 if (info->feature_discard) { 949 if (info->feature_discard) {
@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
2149 return err; 2143 return err;
2150 2144
2151 err = talk_to_blkback(dev, info); 2145 err = talk_to_blkback(dev, info);
2146 if (!err)
2147 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2152 2148
2153 /* 2149 /*
2154 * We have to wait for the backend to switch to 2150 * We have to wait for the backend to switch to
@@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
2485 break; 2481 break;
2486 2482
2487 case XenbusStateConnected: 2483 case XenbusStateConnected:
2488 if (dev->state != XenbusStateInitialised) { 2484 /*
2485 * talk_to_blkback sets state to XenbusStateInitialised
2486 * and blkfront_connect sets it to XenbusStateConnected
2487 * (if connection went OK).
2488 *
2489 * If the backend (or toolstack) decides to poke at backend
2490 * state (and re-trigger the watch by setting the state repeatedly
2491 * to XenbusStateConnected (4)) we need to deal with this.
2492 * This is allowed as this is used to communicate to the guest
2493 * that the size of disk has changed!
2494 */
2495 if ((dev->state != XenbusStateInitialised) &&
2496 (dev->state != XenbusStateConnected)) {
2489 if (talk_to_blkback(dev, info)) 2497 if (talk_to_blkback(dev, info))
2490 break; 2498 break;
2491 } 2499 }
2500
2492 blkfront_connect(info); 2501 blkfront_connect(info);
2493 break; 2502 break;
2494 2503
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index aef87fdbd187..44311296ec02 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
840 return false; 840 return false;
841} 841}
842 842
843void intel_gtt_insert_page(dma_addr_t addr,
844 unsigned int pg,
845 unsigned int flags)
846{
847 intel_private.driver->write_entry(addr, pg, flags);
848}
849EXPORT_SYMBOL(intel_gtt_insert_page);
850
843void intel_gtt_insert_sg_entries(struct sg_table *st, 851void intel_gtt_insert_sg_entries(struct sg_table *st,
844 unsigned int pg_start, 852 unsigned int pg_start,
845 unsigned int flags) 853 unsigned int flags)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 94fb407d8561..44b1bd6baa38 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3820 while (!list_empty(&intf->waiting_rcv_msgs)) { 3820 while (!list_empty(&intf->waiting_rcv_msgs)) {
3821 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 3821 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
3822 struct ipmi_smi_msg, link); 3822 struct ipmi_smi_msg, link);
3823 list_del(&smi_msg->link);
3823 if (!run_to_completion) 3824 if (!run_to_completion)
3824 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 3825 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3825 flags); 3826 flags);
@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3829 if (rv > 0) { 3830 if (rv > 0) {
3830 /* 3831 /*
3831 * To preserve message order, quit if we 3832 * To preserve message order, quit if we
3832 * can't handle a message. 3833 * can't handle a message. Add the message
3834 * back at the head, this is safe because this
3835 * tasklet is the only thing that pulls the
3836 * messages.
3833 */ 3837 */
3838 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
3834 break; 3839 break;
3835 } else { 3840 } else {
3836 list_del(&smi_msg->link);
3837 if (rv == 0) 3841 if (rv == 0)
3838 /* Message handled */ 3842 /* Message handled */
3839 ipmi_free_smi_msg(smi_msg); 3843 ipmi_free_smi_msg(smi_msg);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 53ddba26578c..98efbfcdb503 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
175config COMMON_CLK_NXP 175config COMMON_CLK_NXP
176 def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) 176 def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
177 select REGMAP_MMIO if ARCH_LPC32XX 177 select REGMAP_MMIO if ARCH_LPC32XX
178 select MFD_SYSCON if ARCH_LPC18XX
178 ---help--- 179 ---help---
179 Support for clock providers on NXP platforms. 180 Support for clock providers on NXP platforms.
180 181
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 020a29acc5b0..51f54380474b 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
180 180
181 /* register fixed rate clocks */ 181 /* register fixed rate clocks */
182 clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, 182 clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
183 CLK_IS_ROOT, 24000000); 183 0, 24000000);
184 clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, 184 clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
185 CLK_IS_ROOT, 8000000); 185 0, 8000000);
186 clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, 186 clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
187 CLK_IS_ROOT, 8000000); 187 0, 8000000);
188 clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, 188 clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
189 CLK_IS_ROOT, 32000); 189 0, 32000);
190 clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, 190 clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
191 CLK_IS_ROOT, 24000000); 191 0, 24000000);
192 /* fixed rate (optional) clock */ 192 /* fixed rate (optional) clock */
193 if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { 193 if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
194 pr_info("pic32-clk: dt requests SOSC.\n"); 194 pr_info("pic32-clk: dt requests SOSC.\n");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 36bc11a106aa..9009295f5134 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1832unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 1832unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1833 unsigned int target_freq) 1833 unsigned int target_freq)
1834{ 1834{
1835 clamp_val(target_freq, policy->min, policy->max); 1835 target_freq = clamp_val(target_freq, policy->min, policy->max);
1836 1836
1837 return cpufreq_driver->fast_switch(policy, target_freq); 1837 return cpufreq_driver->fast_switch(policy, target_freq);
1838} 1838}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3a9c4325d6e2..fe9dc17ea873 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void)
372 return acpi_ppc; 372 return acpi_ppc;
373} 373}
374 374
375/*
376 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
377 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
378 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
379 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
380 * target ratio 0x17. The _PSS control value stores in a format which can be
381 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
382 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
383 * This function converts the _PSS control value to intel pstate driver format
384 * for comparison and assignment.
385 */
386static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
387{
388 return cpu->acpi_perf_data.states[index].control >> 8;
389}
390
391static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 375static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
392{ 376{
393 struct cpudata *cpu; 377 struct cpudata *cpu;
394 int turbo_pss_ctl;
395 int ret; 378 int ret;
396 int i; 379 int i;
397 380
@@ -441,15 +424,14 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
441 * max frequency, which will cause a reduced performance as 424 * max frequency, which will cause a reduced performance as
442 * this driver uses real max turbo frequency as the max 425 * this driver uses real max turbo frequency as the max
443 * frequency. So correct this frequency in _PSS table to 426 * frequency. So correct this frequency in _PSS table to
444 * correct max turbo frequency based on the turbo ratio. 427 * correct max turbo frequency based on the turbo state.
445 * Also need to convert to MHz as _PSS freq is in MHz. 428 * Also need to convert to MHz as _PSS freq is in MHz.
446 */ 429 */
447 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 430 if (!limits->turbo_disabled)
448 if (turbo_pss_ctl > cpu->pstate.max_pstate)
449 cpu->acpi_perf_data.states[0].core_frequency = 431 cpu->acpi_perf_data.states[0].core_frequency =
450 policy->cpuinfo.max_freq / 1000; 432 policy->cpuinfo.max_freq / 1000;
451 cpu->valid_pss_table = true; 433 cpu->valid_pss_table = true;
452 pr_info("_PPC limits will be enforced\n"); 434 pr_debug("_PPC limits will be enforced\n");
453 435
454 return; 436 return;
455 437
@@ -1460,6 +1442,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1460 1442
1461 intel_pstate_clear_update_util_hook(policy->cpu); 1443 intel_pstate_clear_update_util_hook(policy->cpu);
1462 1444
1445 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1446 policy->cpuinfo.max_freq, policy->max);
1447
1463 cpu = all_cpu_data[0]; 1448 cpu = all_cpu_data[0];
1464 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1449 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
1465 policy->max < policy->cpuinfo.max_freq && 1450 policy->max < policy->cpuinfo.max_freq &&
@@ -1495,13 +1480,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1495 limits->max_sysfs_pct); 1480 limits->max_sysfs_pct);
1496 limits->max_perf_pct = max(limits->min_policy_pct, 1481 limits->max_perf_pct = max(limits->min_policy_pct,
1497 limits->max_perf_pct); 1482 limits->max_perf_pct);
1498 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1499 1483
1500 /* Make sure min_perf_pct <= max_perf_pct */ 1484 /* Make sure min_perf_pct <= max_perf_pct */
1501 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1485 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1502 1486
1503 limits->min_perf = div_fp(limits->min_perf_pct, 100); 1487 limits->min_perf = div_fp(limits->min_perf_pct, 100);
1504 limits->max_perf = div_fp(limits->max_perf_pct, 100); 1488 limits->max_perf = div_fp(limits->max_perf_pct, 100);
1489 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1505 1490
1506 out: 1491 out:
1507 intel_pstate_set_update_util_hook(policy->cpu); 1492 intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1543,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1558 1543
1559 /* cpuinfo and default policy values */ 1544 /* cpuinfo and default policy values */
1560 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1545 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1561 policy->cpuinfo.max_freq = 1546 update_turbo_state();
1562 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1547 policy->cpuinfo.max_freq = limits->turbo_disabled ?
1548 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1549 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
1550
1563 intel_pstate_init_acpi_perf_limits(policy); 1551 intel_pstate_init_acpi_perf_limits(policy);
1564 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1552 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1565 cpumask_set_cpu(policy->cpu, policy->cpus); 1553 cpumask_set_cpu(policy->cpu, policy->cpus);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 808a320e9d5d..a7ecb9a84c15 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
487 doorbell.space_id = reg_resource->space_id; 487 doorbell.space_id = reg_resource->space_id;
488 doorbell.bit_width = reg_resource->bit_width; 488 doorbell.bit_width = reg_resource->bit_width;
489 doorbell.bit_offset = reg_resource->bit_offset; 489 doorbell.bit_offset = reg_resource->bit_offset;
490 doorbell.access_width = 64; 490 doorbell.access_width = 4;
491 doorbell.address = reg_resource->address; 491 doorbell.address = reg_resource->address;
492 492
493 pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " 493 pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395cb8d8..0d0d4529ee36 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
124 unsigned int unit; 124 unsigned int unit;
125 u32 unit_size;
125 int ret; 126 int ret;
126 127
127 if (!ctx->u.aes.key_len) 128 if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
133 if (!req->info) 134 if (!req->info)
134 return -EINVAL; 135 return -EINVAL;
135 136
136 for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) 137 unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
137 if (!(req->nbytes & (unit_size_map[unit].size - 1))) 138 if (req->nbytes <= unit_size_map[0].size) {
138 break; 139 for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
140 if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
141 unit_size = unit_size_map[unit].value;
142 break;
143 }
144 }
145 }
139 146
140 if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || 147 if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
141 (ctx->u.aes.key_len != AES_KEYSIZE_128)) { 148 (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
142 /* Use the fallback to process the request for any 149 /* Use the fallback to process the request for any
143 * unsupported unit sizes or key sizes 150 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
158 rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; 165 rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
159 rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT 166 rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
160 : CCP_AES_ACTION_DECRYPT; 167 : CCP_AES_ACTION_DECRYPT;
161 rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; 168 rctx->cmd.u.xts.unit_size = unit_size;
162 rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; 169 rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
163 rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; 170 rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
164 rctx->cmd.u.xts.iv = &rctx->iv_sg; 171 rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2fe58f..63464e86f2b1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@ err_algs:
1986 &dd->pdata->algs_info[i].algs_list[j]); 1986 &dd->pdata->algs_info[i].algs_list[j]);
1987err_pm: 1987err_pm:
1988 pm_runtime_disable(dev); 1988 pm_runtime_disable(dev);
1989 if (dd->polling_mode) 1989 if (!dd->polling_mode)
1990 dma_release_channel(dd->dma_lch); 1990 dma_release_channel(dd->dma_lch);
1991data_err: 1991data_err:
1992 dev_err(dev, "initialization failed.\n"); 1992 dev_err(dev, "initialization failed.\n");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 1d6c803804d5..e92418facc92 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq)
268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
269 269
270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
271 if (err) 271 if (err) {
272 freqs.new = cur_freq;
273 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
272 return err; 274 return err;
275 }
273 276
274 freqs.new = freq; 277 freqs.new = freq;
275 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 278 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
@@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
552 devfreq->profile = profile; 555 devfreq->profile = profile;
553 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 556 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
554 devfreq->previous_freq = profile->initial_freq; 557 devfreq->previous_freq = profile->initial_freq;
558 devfreq->last_status.current_frequency = profile->initial_freq;
555 devfreq->data = data; 559 devfreq->data = data;
556 devfreq->nb.notifier_call = devfreq_notifier_call; 560 devfreq->nb.notifier_call = devfreq_notifier_call;
557 561
@@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev,
561 mutex_lock(&devfreq->lock); 565 mutex_lock(&devfreq->lock);
562 } 566 }
563 567
564 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
565 devfreq->profile->max_state *
566 devfreq->profile->max_state,
567 GFP_KERNEL);
568 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
569 devfreq->profile->max_state,
570 GFP_KERNEL);
571 devfreq->last_stat_updated = jiffies;
572
573 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 568 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
574 err = device_register(&devfreq->dev); 569 err = device_register(&devfreq->dev);
575 if (err) { 570 if (err) {
576 put_device(&devfreq->dev);
577 mutex_unlock(&devfreq->lock); 571 mutex_unlock(&devfreq->lock);
578 goto err_out; 572 goto err_out;
579 } 573 }
580 574
575 devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
576 devfreq->profile->max_state *
577 devfreq->profile->max_state,
578 GFP_KERNEL);
579 devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
580 devfreq->profile->max_state,
581 GFP_KERNEL);
582 devfreq->last_stat_updated = jiffies;
583
581 srcu_init_notifier_head(&devfreq->transition_notifier_list); 584 srcu_init_notifier_head(&devfreq->transition_notifier_list);
582 585
583 mutex_unlock(&devfreq->lock); 586 mutex_unlock(&devfreq->lock);
@@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
603err_init: 606err_init:
604 list_del(&devfreq->node); 607 list_del(&devfreq->node);
605 device_unregister(&devfreq->dev); 608 device_unregister(&devfreq->dev);
606 kfree(devfreq);
607err_out: 609err_out:
608 return ERR_PTR(err); 610 return ERR_PTR(err);
609} 611}
@@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
621 return -EINVAL; 623 return -EINVAL;
622 624
623 device_unregister(&devfreq->dev); 625 device_unregister(&devfreq->dev);
624 put_device(&devfreq->dev);
625 626
626 return 0; 627 return 0;
627} 628}
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 6b6a5f310486..a5841403bde8 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
220 220
221 /* Maps the memory mapped IO to control nocp register */ 221 /* Maps the memory mapped IO to control nocp register */
222 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 222 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
223 if (IS_ERR(res))
224 return PTR_ERR(res);
225
226 base = devm_ioremap_resource(dev, res); 223 base = devm_ioremap_resource(dev, res);
227 if (IS_ERR(base)) 224 if (IS_ERR(base))
228 return PTR_ERR(base); 225 return PTR_ERR(base);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca75ed..f353db213a81 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o 1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
2obj-$(CONFIG_SYNC_FILE) += sync_file.o 2obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07ee6677..20ce0687b111 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/reservation.h> 35#include <linux/reservation.h>
36#include <linux/mm.h>
36 37
37#include <uapi/linux/dma-buf.h> 38#include <uapi/linux/dma-buf.h>
38 39
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
90 dmabuf = file->private_data; 91 dmabuf = file->private_data;
91 92
92 /* check for overflowing the buffer's size */ 93 /* check for overflowing the buffer's size */
93 if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 94 if (vma->vm_pgoff + vma_pages(vma) >
94 dmabuf->size >> PAGE_SHIFT) 95 dmabuf->size >> PAGE_SHIFT)
95 return -EINVAL; 96 return -EINVAL;
96 97
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
723 return -EINVAL; 724 return -EINVAL;
724 725
725 /* check for offset overflow */ 726 /* check for offset overflow */
726 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) 727 if (pgoff + vma_pages(vma) < pgoff)
727 return -EOVERFLOW; 728 return -EOVERFLOW;
728 729
729 /* check for overflowing the buffer's size */ 730 /* check for overflowing the buffer's size */
730 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 731 if (pgoff + vma_pages(vma) >
731 dmabuf->size >> PAGE_SHIFT) 732 dmabuf->size >> PAGE_SHIFT)
732 return -EINVAL; 733 return -EINVAL;
733 734
@@ -823,7 +824,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
823EXPORT_SYMBOL_GPL(dma_buf_vunmap); 824EXPORT_SYMBOL_GPL(dma_buf_vunmap);
824 825
825#ifdef CONFIG_DEBUG_FS 826#ifdef CONFIG_DEBUG_FS
826static int dma_buf_describe(struct seq_file *s) 827static int dma_buf_debug_show(struct seq_file *s, void *unused)
827{ 828{
828 int ret; 829 int ret;
829 struct dma_buf *buf_obj; 830 struct dma_buf *buf_obj;
@@ -878,17 +879,9 @@ static int dma_buf_describe(struct seq_file *s)
878 return 0; 879 return 0;
879} 880}
880 881
881static int dma_buf_show(struct seq_file *s, void *unused)
882{
883 void (*func)(struct seq_file *) = s->private;
884
885 func(s);
886 return 0;
887}
888
889static int dma_buf_debug_open(struct inode *inode, struct file *file) 882static int dma_buf_debug_open(struct inode *inode, struct file *file)
890{ 883{
891 return single_open(file, dma_buf_show, inode->i_private); 884 return single_open(file, dma_buf_debug_show, NULL);
892} 885}
893 886
894static const struct file_operations dma_buf_debug_fops = { 887static const struct file_operations dma_buf_debug_fops = {
@@ -902,20 +895,23 @@ static struct dentry *dma_buf_debugfs_dir;
902 895
903static int dma_buf_init_debugfs(void) 896static int dma_buf_init_debugfs(void)
904{ 897{
898 struct dentry *d;
905 int err = 0; 899 int err = 0;
906 900
907 dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); 901 d = debugfs_create_dir("dma_buf", NULL);
908 902 if (IS_ERR(d))
909 if (IS_ERR(dma_buf_debugfs_dir)) { 903 return PTR_ERR(d);
910 err = PTR_ERR(dma_buf_debugfs_dir);
911 dma_buf_debugfs_dir = NULL;
912 return err;
913 }
914 904
915 err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); 905 dma_buf_debugfs_dir = d;
916 906
917 if (err) 907 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
908 NULL, &dma_buf_debug_fops);
909 if (IS_ERR(d)) {
918 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 910 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
911 debugfs_remove_recursive(dma_buf_debugfs_dir);
912 dma_buf_debugfs_dir = NULL;
913 err = PTR_ERR(d);
914 }
919 915
920 return err; 916 return err;
921} 917}
@@ -925,17 +921,6 @@ static void dma_buf_uninit_debugfs(void)
925 if (dma_buf_debugfs_dir) 921 if (dma_buf_debugfs_dir)
926 debugfs_remove_recursive(dma_buf_debugfs_dir); 922 debugfs_remove_recursive(dma_buf_debugfs_dir);
927} 923}
928
929int dma_buf_debugfs_create_file(const char *name,
930 int (*write)(struct seq_file *))
931{
932 struct dentry *d;
933
934 d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
935 write, &dma_buf_debug_fops);
936
937 return PTR_ERR_OR_ZERO(d);
938}
939#else 924#else
940static inline int dma_buf_init_debugfs(void) 925static inline int dma_buf_init_debugfs(void)
941{ 926{
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000000..a8731c853da6
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
1/*
2 * fence-array: aggregate fences to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/fence-array.h>
23
24static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
25
26static const char *fence_array_get_driver_name(struct fence *fence)
27{
28 return "fence_array";
29}
30
31static const char *fence_array_get_timeline_name(struct fence *fence)
32{
33 return "unbound";
34}
35
36static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
37{
38 struct fence_array_cb *array_cb =
39 container_of(cb, struct fence_array_cb, cb);
40 struct fence_array *array = array_cb->array;
41
42 if (atomic_dec_and_test(&array->num_pending))
43 fence_signal(&array->base);
44 fence_put(&array->base);
45}
46
47static bool fence_array_enable_signaling(struct fence *fence)
48{
49 struct fence_array *array = to_fence_array(fence);
50 struct fence_array_cb *cb = (void *)(&array[1]);
51 unsigned i;
52
53 for (i = 0; i < array->num_fences; ++i) {
54 cb[i].array = array;
55 /*
56 * As we may report that the fence is signaled before all
57 * callbacks are complete, we need to take an additional
58 * reference count on the array so that we do not free it too
59 * early. The core fence handling will only hold the reference
60 * until we signal the array as complete (but that is now
61 * insufficient).
62 */
63 fence_get(&array->base);
64 if (fence_add_callback(array->fences[i], &cb[i].cb,
65 fence_array_cb_func)) {
66 fence_put(&array->base);
67 if (atomic_dec_and_test(&array->num_pending))
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool fence_array_signaled(struct fence *fence)
76{
77 struct fence_array *array = to_fence_array(fence);
78
79 return atomic_read(&array->num_pending) <= 0;
80}
81
82static void fence_array_release(struct fence *fence)
83{
84 struct fence_array *array = to_fence_array(fence);
85 unsigned i;
86
87 for (i = 0; i < array->num_fences; ++i)
88 fence_put(array->fences[i]);
89
90 kfree(array->fences);
91 fence_free(fence);
92}
93
94const struct fence_ops fence_array_ops = {
95 .get_driver_name = fence_array_get_driver_name,
96 .get_timeline_name = fence_array_get_timeline_name,
97 .enable_signaling = fence_array_enable_signaling,
98 .signaled = fence_array_signaled,
99 .wait = fence_default_wait,
100 .release = fence_array_release,
101};
102
103/**
104 * fence_array_create - Create a custom fence array
105 * @num_fences: [in] number of fences to add in the array
106 * @fences: [in] array containing the fences
107 * @context: [in] fence context to use
108 * @seqno: [in] sequence number to use
109 * @signal_on_any [in] signal on any fence in the array
110 *
111 * Allocate a fence_array object and initialize the base fence with fence_init().
112 * In case of error it returns NULL.
113 *
114 * The caller should allocte the fences array with num_fences size
115 * and fill it with the fences it wants to add to the object. Ownership of this
116 * array is take and fence_put() is used on each fence on release.
117 *
118 * If @signal_on_any is true the fence array signals if any fence in the array
119 * signals, otherwise it signals when all fences in the array signal.
120 */
121struct fence_array *fence_array_create(int num_fences, struct fence **fences,
122 u64 context, unsigned seqno,
123 bool signal_on_any)
124{
125 struct fence_array *array;
126 size_t size = sizeof(*array);
127
128 /* Allocate the callback structures behind the array. */
129 size += num_fences * sizeof(struct fence_array_cb);
130 array = kzalloc(size, GFP_KERNEL);
131 if (!array)
132 return NULL;
133
134 spin_lock_init(&array->lock);
135 fence_init(&array->base, &fence_array_ops, &array->lock,
136 context, seqno);
137
138 array->num_fences = num_fences;
139 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
140 array->fences = fences;
141
142 return array;
143}
144EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
35 * context or not. One device can have multiple separate contexts, 35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another. 36 * and they're used if some engine can run independently of another.
37 */ 37 */
38static atomic_t fence_context_counter = ATOMIC_INIT(0); 38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39 39
40/** 40/**
41 * fence_context_alloc - allocate an array of fence contexts 41 * fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
44 * This function will return the first index of the number of fences allocated. 44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number. 45 * The fence context is used for setting fence->context to a unique number.
46 */ 46 */
47unsigned fence_context_alloc(unsigned num) 47u64 fence_context_alloc(unsigned num)
48{ 48{
49 BUG_ON(!num); 49 BUG_ON(!num);
50 return atomic_add_return(num, &fence_context_counter) - num; 50 return atomic64_add_return(num, &fence_context_counter) - num;
51} 51}
52EXPORT_SYMBOL(fence_context_alloc); 52EXPORT_SYMBOL(fence_context_alloc);
53 53
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
513 */ 513 */
514void 514void
515fence_init(struct fence *fence, const struct fence_ops *ops, 515fence_init(struct fence *fence, const struct fence_ops *ops,
516 spinlock_t *lock, unsigned context, unsigned seqno) 516 spinlock_t *lock, u64 context, unsigned seqno)
517{ 517{
518 BUG_ON(!lock); 518 BUG_ON(!lock);
519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling || 519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd5722c997..9566a62ad8e3 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
35#include <linux/reservation.h> 35#include <linux/reservation.h>
36#include <linux/export.h> 36#include <linux/export.h>
37 37
38/**
39 * DOC: Reservation Object Overview
40 *
41 * The reservation object provides a mechanism to manage shared and
42 * exclusive fences associated with a buffer. A reservation object
43 * can have attached one exclusive fence (normally associated with
44 * write operations) or N shared fences (read operations). The RCU
45 * mechanism is used to protect read access to fences from locked
46 * write-side updates.
47 */
48
38DEFINE_WW_CLASS(reservation_ww_class); 49DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class); 50EXPORT_SYMBOL(reservation_ww_class);
40 51
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
43 54
44const char reservation_seqcount_string[] = "reservation_seqcount"; 55const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string); 56EXPORT_SYMBOL(reservation_seqcount_string);
46/* 57
47 * Reserve space to add a shared fence to a reservation_object, 58/**
48 * must be called with obj->lock held. 59 * reservation_object_reserve_shared - Reserve space to add a shared
60 * fence to a reservation_object.
61 * @obj: reservation object
62 *
63 * Should be called before reservation_object_add_shared_fence(). Must
64 * be called with obj->lock held.
65 *
66 * RETURNS
67 * Zero for success, or -errno
49 */ 68 */
50int reservation_object_reserve_shared(struct reservation_object *obj) 69int reservation_object_reserve_shared(struct reservation_object *obj)
51{ 70{
@@ -180,7 +199,11 @@ done:
180 fence_put(old_fence); 199 fence_put(old_fence);
181} 200}
182 201
183/* 202/**
203 * reservation_object_add_shared_fence - Add a fence to a shared slot
204 * @obj: the reservation object
205 * @fence: the shared fence to add
206 *
184 * Add a fence to a shared slot, obj->lock must be held, and 207 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called. 208 * reservation_object_reserve_shared_fence has been called.
186 */ 209 */
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
200} 223}
201EXPORT_SYMBOL(reservation_object_add_shared_fence); 224EXPORT_SYMBOL(reservation_object_add_shared_fence);
202 225
226/**
227 * reservation_object_add_excl_fence - Add an exclusive fence.
228 * @obj: the reservation object
229 * @fence: the shared fence to add
230 *
231 * Add a fence to the exclusive slot. The obj->lock must be held.
232 */
203void reservation_object_add_excl_fence(struct reservation_object *obj, 233void reservation_object_add_excl_fence(struct reservation_object *obj,
204 struct fence *fence) 234 struct fence *fence)
205{ 235{
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
233} 263}
234EXPORT_SYMBOL(reservation_object_add_excl_fence); 264EXPORT_SYMBOL(reservation_object_add_excl_fence);
235 265
266/**
267 * reservation_object_get_fences_rcu - Get an object's shared and exclusive
268 * fences without update side lock held
269 * @obj: the reservation object
270 * @pfence_excl: the returned exclusive fence (or NULL)
271 * @pshared_count: the number of shared fences returned
272 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
273 * the required size, and must be freed by caller)
274 *
275 * RETURNS
276 * Zero or -errno
277 */
236int reservation_object_get_fences_rcu(struct reservation_object *obj, 278int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl, 279 struct fence **pfence_excl,
238 unsigned *pshared_count, 280 unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
319} 361}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); 362EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321 363
364/**
365 * reservation_object_wait_timeout_rcu - Wait on reservation's objects
366 * shared and/or exclusive fences.
367 * @obj: the reservation object
368 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
369 * @intr: if true, do interruptible wait
370 * @timeout: timeout value in jiffies or zero to return immediately
371 *
372 * RETURNS
373 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
374 * greater than zer on success.
375 */
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 376long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr, 377 bool wait_all, bool intr,
324 unsigned long timeout) 378 unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
416 return ret; 470 return ret;
417} 471}
418 472
473/**
474 * reservation_object_test_signaled_rcu - Test if a reservation object's
475 * fences have been signaled.
476 * @obj: the reservation object
477 * @test_all: if true, test all fences, otherwise only test the exclusive
478 * fence
479 *
480 * RETURNS
481 * true if all fences signaled, else false
482 */
419bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 483bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
420 bool test_all) 484 bool test_all)
421{ 485{
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d8309e..9aaa608dfe01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
82 82
83 sync_file->num_fences = 1; 83 sync_file->num_fences = 1;
84 atomic_set(&sync_file->status, 1); 84 atomic_set(&sync_file->status, 1);
85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", 85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
86 fence->ops->get_driver_name(fence), 86 fence->ops->get_driver_name(fence),
87 fence->ops->get_timeline_name(fence), fence->context, 87 fence->ops->get_timeline_name(fence), fence->context,
88 fence->seqno); 88 fence->seqno);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 8e304b1befc5..75bd6621dc5d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
242 u32 mbr_dus; /* Destination Microblock Stride Register */ 242 u32 mbr_dus; /* Destination Microblock Stride Register */
243}; 243};
244 244
245 245/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
246struct at_xdmac_desc { 246struct at_xdmac_desc {
247 struct at_xdmac_lld lld; 247 struct at_xdmac_lld lld;
248 enum dma_transfer_direction direction; 248 enum dma_transfer_direction direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
253 unsigned int xfer_size; 253 unsigned int xfer_size;
254 struct list_head descs_list; 254 struct list_head descs_list;
255 struct list_head xfer_node; 255 struct list_head xfer_node;
256}; 256} __aligned(sizeof(u64));
257 257
258static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) 258static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
259{ 259{
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1400 u32 cur_nda, check_nda, cur_ubc, mask, value; 1400 u32 cur_nda, check_nda, cur_ubc, mask, value;
1401 u8 dwidth = 0; 1401 u8 dwidth = 0;
1402 unsigned long flags; 1402 unsigned long flags;
1403 bool initd;
1403 1404
1404 ret = dma_cookie_status(chan, cookie, txstate); 1405 ret = dma_cookie_status(chan, cookie, txstate);
1405 if (ret == DMA_COMPLETE) 1406 if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1424 residue = desc->xfer_size; 1425 residue = desc->xfer_size;
1425 /* 1426 /*
1426 * Flush FIFO: only relevant when the transfer is source peripheral 1427 * Flush FIFO: only relevant when the transfer is source peripheral
1427 * synchronized. 1428 * synchronized. Flush is needed before reading CUBC because data in
1429 * the FIFO are not reported by CUBC. Reporting a residue of the
1430 * transfer length while we have data in FIFO can cause issue.
1431 * Usecase: atmel USART has a timeout which means I have received
1432 * characters but there is no more character received for a while. On
1433 * timeout, it requests the residue. If the data are in the DMA FIFO,
1434 * we will return a residue of the transfer length. It means no data
1435 * received. If an application is waiting for these data, it will hang
1436 * since we won't have another USART timeout without receiving new
1437 * data.
1428 */ 1438 */
1429 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 1439 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1430 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; 1440 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1435 } 1445 }
1436 1446
1437 /* 1447 /*
1438 * When processing the residue, we need to read two registers but we 1448 * The easiest way to compute the residue should be to pause the DMA
1439 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where 1449 * but doing this can lead to miss some data as some devices don't
1440 * we stand in the descriptor list and AT_XDMAC_CUBC is used 1450 * have FIFO.
1441 * to know how many data are remaining for the current descriptor. 1451 * We need to read several registers because:
1442 * Since the dma channel is not paused to not loose data, between the 1452 * - DMA is running therefore a descriptor change is possible while
1443 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of 1453 * reading these registers
1444 * descriptor. 1454 * - When the block transfer is done, the value of the CUBC register
1445 * For that reason, after reading AT_XDMAC_CUBC, we check if we are 1455 * is set to its initial value until the fetch of the next descriptor.
1446 * still using the same descriptor by reading a second time 1456 * This value will corrupt the residue calculation so we have to skip
1447 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to 1457 * it.
1448 * read again AT_XDMAC_CUBC. 1458 *
1459 * INITD -------- ------------
1460 * |____________________|
1461 * _______________________ _______________
1462 * NDA @desc2 \/ @desc3
1463 * _______________________/\_______________
1464 * __________ ___________ _______________
1465 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1466 * __________/\___________/\_______________
1467 *
1468 * Since descriptors are aligned on 64 bits, we can assume that
1469 * the update of NDA and CUBC is atomic.
1449 * Memory barriers are used to ensure the read order of the registers. 1470 * Memory barriers are used to ensure the read order of the registers.
1450 * A max number of retries is set because unlikely it can never ends if 1471 * A max number of retries is set because unlikely it could never ends.
1451 * we are transferring a lot of data with small buffers.
1452 */ 1472 */
1453 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1454 rmb();
1455 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1456 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { 1473 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1457 rmb();
1458 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1474 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1459 1475 rmb();
1460 if (likely(cur_nda == check_nda)) 1476 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1461 break;
1462
1463 cur_nda = check_nda;
1464 rmb(); 1477 rmb();
1465 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); 1478 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1479 rmb();
1480 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1481 rmb();
1482
1483 if ((check_nda == cur_nda) && initd)
1484 break;
1466 } 1485 }
1467 1486
1468 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { 1487 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1471 } 1490 }
1472 1491
1473 /* 1492 /*
1493 * Flush FIFO: only relevant when the transfer is source peripheral
1494 * synchronized. Another flush is needed here because CUBC is updated
1495 * when the controller sends the data write command. It can lead to
1496 * report data that are not written in the memory or the device. The
1497 * FIFO flush ensures that data are really written.
1498 */
1499 if ((desc->lld.mbr_cfg & mask) == value) {
1500 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1501 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1502 cpu_relax();
1503 }
1504
1505 /*
1474 * Remove size of all microblocks already transferred and the current 1506 * Remove size of all microblocks already transferred and the current
1475 * one. Then add the remaining size to transfer of the current 1507 * one. Then add the remaining size to transfer of the current
1476 * microblock. 1508 * microblock.
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 25d1dadcddd1..d0446a75990a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
703 goto free_resources; 703 goto free_resources;
704 } 704 }
705 705
706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
707 PAGE_SIZE, DMA_TO_DEVICE); 707 (size_t)src & ~PAGE_MASK, PAGE_SIZE,
708 DMA_TO_DEVICE);
708 unmap->addr[0] = src_dma; 709 unmap->addr[0] = src_dma;
709 710
710 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 711 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
714 } 715 }
715 unmap->to_cnt = 1; 716 unmap->to_cnt = 1;
716 717
717 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 718 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
718 PAGE_SIZE, DMA_FROM_DEVICE); 719 (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
720 DMA_FROM_DEVICE);
719 unmap->addr[1] = dest_dma; 721 unmap->addr[1] = dest_dma;
720 722
721 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 723 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6aa256b0a1ed..c3ee3ad98a63 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
565 list_for_each(item, &mc_devices) { 565 list_for_each(item, &mc_devices) {
566 mci = list_entry(item, struct mem_ctl_info, link); 566 mci = list_entry(item, struct mem_ctl_info, link);
567 567
568 edac_mod_work(&mci->work, value); 568 if (mci->op_state == OP_RUNNING_POLL)
569 edac_mod_work(&mci->work, value);
569 } 570 }
570 mutex_unlock(&mem_ctls_mutex); 571 mutex_unlock(&mem_ctls_mutex);
571} 572}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index b4d0bf6534cf..6744d88bdea8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
239 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 239 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
240}; 240};
241 241
242#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) 242#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
243#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) 243 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
244
245#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
246 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
244 247
245/* Device 16, functions 2-7 */ 248/* Device 16, functions 2-7 */
246 249
@@ -326,6 +329,7 @@ struct pci_id_descr {
326struct pci_id_table { 329struct pci_id_table {
327 const struct pci_id_descr *descr; 330 const struct pci_id_descr *descr;
328 int n_devs; 331 int n_devs;
332 enum type type;
329}; 333};
330 334
331struct sbridge_dev { 335struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
394 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, 398 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
395}; 399};
396 400
397#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } 401#define PCI_ID_TABLE_ENTRY(A, T) { \
402 .descr = A, \
403 .n_devs = ARRAY_SIZE(A), \
404 .type = T \
405}
406
398static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 407static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
399 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), 408 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
400 {0,} /* 0 terminated list. */ 409 {0,} /* 0 terminated list. */
401}; 410};
402 411
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
463}; 472};
464 473
465static const struct pci_id_table pci_dev_descr_ibridge_table[] = { 474static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
466 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), 475 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
467 {0,} /* 0 terminated list. */ 476 {0,} /* 0 terminated list. */
468}; 477};
469 478
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
536}; 545};
537 546
538static const struct pci_id_table pci_dev_descr_haswell_table[] = { 547static const struct pci_id_table pci_dev_descr_haswell_table[] = {
539 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), 548 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
540 {0,} /* 0 terminated list. */ 549 {0,} /* 0 terminated list. */
541}; 550};
542 551
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
580}; 589};
581 590
582static const struct pci_id_table pci_dev_descr_knl_table[] = { 591static const struct pci_id_table pci_dev_descr_knl_table[] = {
583 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), 592 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
584 {0,} 593 {0,}
585}; 594};
586 595
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
648}; 657};
649 658
650static const struct pci_id_table pci_dev_descr_broadwell_table[] = { 659static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
651 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), 660 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
652 {0,} /* 0 terminated list. */ 661 {0,} /* 0 terminated list. */
653}; 662};
654 663
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1894 pci_read_config_dword(pvt->pci_tad[i], 1903 pci_read_config_dword(pvt->pci_tad[i],
1895 rir_offset[j][k], 1904 rir_offset[j][k],
1896 &reg); 1905 &reg);
1897 tmp_mb = RIR_OFFSET(reg) << 6; 1906 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1898 1907
1899 gb = div_u64_rem(tmp_mb, 1024, &mb); 1908 gb = div_u64_rem(tmp_mb, 1024, &mb);
1900 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1909 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1901 i, j, k, 1910 i, j, k,
1902 gb, (mb*1000)/1024, 1911 gb, (mb*1000)/1024,
1903 ((u64)tmp_mb) << 20L, 1912 ((u64)tmp_mb) << 20L,
1904 (u32)RIR_RNK_TGT(reg), 1913 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1905 reg); 1914 reg);
1906 } 1915 }
1907 } 1916 }
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2234 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], 2243 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2235 rir_offset[n_rir][idx], 2244 rir_offset[n_rir][idx],
2236 &reg); 2245 &reg);
2237 *rank = RIR_RNK_TGT(reg); 2246 *rank = RIR_RNK_TGT(pvt->info.type, reg);
2238 2247
2239 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 2248 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2240 n_rir, 2249 n_rir,
@@ -3357,12 +3366,12 @@ fail0:
3357#define ICPU(model, table) \ 3366#define ICPU(model, table) \
3358 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } 3367 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3359 3368
3360/* Order here must match "enum type" */
3361static const struct x86_cpu_id sbridge_cpuids[] = { 3369static const struct x86_cpu_id sbridge_cpuids[] = {
3362 ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ 3370 ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
3363 ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ 3371 ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
3364 ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ 3372 ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
3365 ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ 3373 ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
3374 ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
3366 ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ 3375 ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
3367 { } 3376 { }
3368}; 3377};
@@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
3398 mc, mc + 1, num_mc); 3407 mc, mc + 1, num_mc);
3399 3408
3400 sbridge_dev->mc = mc++; 3409 sbridge_dev->mc = mc++;
3401 rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids); 3410 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3402 if (unlikely(rc < 0)) 3411 if (unlikely(rc < 0))
3403 goto fail1; 3412 goto fail1;
3404 } 3413 }
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 8b3226dca1d9..caff46c0e214 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
360 360
361 palmas_enable_irq(palmas_usb); 361 palmas_enable_irq(palmas_usb);
362 /* perform initial detection */ 362 /* perform initial detection */
363 if (palmas_usb->enable_gpio_vbus_detection)
364 palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
363 palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); 365 palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
364 device_set_wakeup_capable(&pdev->dev, true); 366 device_set_wakeup_capable(&pdev->dev, true);
365 return 0; 367 return 0;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index a850cbc48d8d..c49d50e68aee 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
174{ 174{
175 efi_memory_desc_t *md; 175 efi_memory_desc_t *md;
176 u64 paddr, npages, size; 176 u64 paddr, npages, size;
177 int resv;
177 178
178 if (efi_enabled(EFI_DBG)) 179 if (efi_enabled(EFI_DBG))
179 pr_info("Processing EFI memory map:\n"); 180 pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
190 paddr = md->phys_addr; 191 paddr = md->phys_addr;
191 npages = md->num_pages; 192 npages = md->num_pages;
192 193
194 resv = is_reserve_region(md);
193 if (efi_enabled(EFI_DBG)) { 195 if (efi_enabled(EFI_DBG)) {
194 char buf[64]; 196 char buf[64];
195 197
196 pr_info(" 0x%012llx-0x%012llx %s", 198 pr_info(" 0x%012llx-0x%012llx %s%s\n",
197 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 199 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
198 efi_md_typeattr_format(buf, sizeof(buf), md)); 200 efi_md_typeattr_format(buf, sizeof(buf), md),
201 resv ? "*" : "");
199 } 202 }
200 203
201 memrange_efi_to_native(&paddr, &npages); 204 memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
204 if (is_normal_ram(md)) 207 if (is_normal_ram(md))
205 early_init_dt_add_memory_arch(paddr, size); 208 early_init_dt_add_memory_arch(paddr, size);
206 209
207 if (is_reserve_region(md)) { 210 if (resv)
208 memblock_mark_nomap(paddr, size); 211 memblock_mark_nomap(paddr, size);
209 if (efi_enabled(EFI_DBG))
210 pr_cont("*");
211 }
212 212
213 if (efi_enabled(EFI_DBG))
214 pr_cont("\n");
215 } 213 }
216 214
217 set_bit(EFI_MEMMAP, &efi.flags); 215 set_bit(EFI_MEMMAP, &efi.flags);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 48da857f4774..cebcb405812e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
33 33
34menuconfig GPIOLIB 34menuconfig GPIOLIB
35 bool "GPIO Support" 35 bool "GPIO Support"
36 select ANON_INODES
36 help 37 help
37 This enables GPIO support through the generic GPIO library. 38 This enables GPIO support through the generic GPIO library.
38 You only need to enable this, if you also want to enable 39 You only need to enable this, if you also want to enable
@@ -530,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers"
530 531
531config GPIO_104_DIO_48E 532config GPIO_104_DIO_48E
532 tristate "ACCES 104-DIO-48E GPIO support" 533 tristate "ACCES 104-DIO-48E GPIO support"
533 depends on ISA 534 depends on ISA_BUS_API
534 select GPIOLIB_IRQCHIP 535 select GPIOLIB_IRQCHIP
535 help 536 help
536 Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, 537 Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -540,7 +541,7 @@ config GPIO_104_DIO_48E
540 541
541config GPIO_104_IDIO_16 542config GPIO_104_IDIO_16
542 tristate "ACCES 104-IDIO-16 GPIO support" 543 tristate "ACCES 104-IDIO-16 GPIO support"
543 depends on ISA 544 depends on ISA_BUS_API
544 select GPIOLIB_IRQCHIP 545 select GPIOLIB_IRQCHIP
545 help 546 help
546 Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, 547 Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -551,7 +552,7 @@ config GPIO_104_IDIO_16
551 552
552config GPIO_104_IDI_48 553config GPIO_104_IDI_48
553 tristate "ACCES 104-IDI-48 GPIO support" 554 tristate "ACCES 104-IDI-48 GPIO support"
554 depends on ISA 555 depends on ISA_BUS_API
555 select GPIOLIB_IRQCHIP 556 select GPIOLIB_IRQCHIP
556 help 557 help
557 Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, 558 Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -627,7 +628,7 @@ config GPIO_TS5500
627 628
628config GPIO_WS16C48 629config GPIO_WS16C48
629 tristate "WinSystems WS16C48 GPIO support" 630 tristate "WinSystems WS16C48 GPIO support"
630 depends on ISA 631 depends on ISA_BUS_API
631 select GPIOLIB_IRQCHIP 632 select GPIOLIB_IRQCHIP
632 help 633 help
633 Enables GPIO support for the WinSystems WS16C48. The base port 634 Enables GPIO support for the WinSystems WS16C48. The base port
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 1a647c07be67..fcf776971ca9 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
75{ 75{
76 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); 76 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
77 const unsigned io_port = offset / 8; 77 const unsigned io_port = offset / 8;
78 const unsigned control_port = io_port / 2; 78 const unsigned int control_port = io_port / 3;
79 const unsigned control_addr = dio48egpio->base + 3 + control_port*4; 79 const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
80 unsigned long flags; 80 unsigned long flags;
81 unsigned control; 81 unsigned control;
@@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
115{ 115{
116 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); 116 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
117 const unsigned io_port = offset / 8; 117 const unsigned io_port = offset / 8;
118 const unsigned control_port = io_port / 2; 118 const unsigned int control_port = io_port / 3;
119 const unsigned mask = BIT(offset % 8); 119 const unsigned mask = BIT(offset % 8);
120 const unsigned control_addr = dio48egpio->base + 3 + control_port*4; 120 const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
121 const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; 121 const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 6c75c83baf5a..2d2763ea1a68 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
247 idi48gpio->irq = irq[id]; 247 idi48gpio->irq = irq[id];
248 248
249 spin_lock_init(&idi48gpio->lock); 249 spin_lock_init(&idi48gpio->lock);
250 spin_lock_init(&idi48gpio->ack_lock);
250 251
251 dev_set_drvdata(dev, idi48gpio); 252 dev_set_drvdata(dev, idi48gpio);
252 253
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 9aabc48ff5de..953e4b829e32 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
547 /* disable interrupts and clear status */ 547 /* disable interrupts and clear status */
548 for (i = 0; i < kona_gpio->num_bank; i++) { 548 for (i = 0; i < kona_gpio->num_bank; i++) {
549 /* Unlock the entire bank first */ 549 /* Unlock the entire bank first */
550 bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); 550 bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
551 writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); 551 writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
552 writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); 552 writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
553 /* Now re-lock the bank */ 553 /* Now re-lock the bank */
554 bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); 554 bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
555 } 555 }
556} 556}
557 557
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014daeef9..fc5f197906ac 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
29 29
30#include <mach/hardware.h> 30#include <mach/hardware.h>
31#include <mach/platform.h> 31#include <mach/platform.h>
32#include <mach/irqs.h>
33 32
34#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) 33#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
35#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) 34#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
371 370
372static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) 371static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
373{ 372{
374 return IRQ_LPC32XX_P0_P1_IRQ; 373 return -ENXIO;
375} 374}
376 375
377static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
378 IRQ_LPC32XX_GPIO_00,
379 IRQ_LPC32XX_GPIO_01,
380 IRQ_LPC32XX_GPIO_02,
381 IRQ_LPC32XX_GPIO_03,
382 IRQ_LPC32XX_GPIO_04,
383 IRQ_LPC32XX_GPIO_05,
384};
385
386static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) 376static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
387{ 377{
388 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
389 return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
390 return -ENXIO; 378 return -ENXIO;
391} 379}
392 380
393static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
394 IRQ_LPC32XX_GPI_00,
395 IRQ_LPC32XX_GPI_01,
396 IRQ_LPC32XX_GPI_02,
397 IRQ_LPC32XX_GPI_03,
398 IRQ_LPC32XX_GPI_04,
399 IRQ_LPC32XX_GPI_05,
400 IRQ_LPC32XX_GPI_06,
401 IRQ_LPC32XX_GPI_07,
402 IRQ_LPC32XX_GPI_08,
403 IRQ_LPC32XX_GPI_09,
404 -ENXIO, /* 10 */
405 -ENXIO, /* 11 */
406 -ENXIO, /* 12 */
407 -ENXIO, /* 13 */
408 -ENXIO, /* 14 */
409 -ENXIO, /* 15 */
410 -ENXIO, /* 16 */
411 -ENXIO, /* 17 */
412 -ENXIO, /* 18 */
413 IRQ_LPC32XX_GPI_19,
414 -ENXIO, /* 20 */
415 -ENXIO, /* 21 */
416 -ENXIO, /* 22 */
417 -ENXIO, /* 23 */
418 -ENXIO, /* 24 */
419 -ENXIO, /* 25 */
420 -ENXIO, /* 26 */
421 -ENXIO, /* 27 */
422 IRQ_LPC32XX_GPI_28,
423};
424
425static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) 381static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
426{ 382{
427 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
428 return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
429 return -ENXIO; 383 return -ENXIO;
430} 384}
431 385
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 75c6355b018d..e72794e463aa 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev)
709 dev_err(&pdev->dev, "input clock not found.\n"); 709 dev_err(&pdev->dev, "input clock not found.\n");
710 return PTR_ERR(gpio->clk); 710 return PTR_ERR(gpio->clk);
711 } 711 }
712 ret = clk_prepare_enable(gpio->clk);
713 if (ret) {
714 dev_err(&pdev->dev, "Unable to enable clock.\n");
715 return ret;
716 }
712 717
718 pm_runtime_set_active(&pdev->dev);
713 pm_runtime_enable(&pdev->dev); 719 pm_runtime_enable(&pdev->dev);
714 ret = pm_runtime_get_sync(&pdev->dev); 720 ret = pm_runtime_get_sync(&pdev->dev);
715 if (ret < 0) 721 if (ret < 0)
@@ -747,6 +753,7 @@ err_pm_put:
747 pm_runtime_put(&pdev->dev); 753 pm_runtime_put(&pdev->dev);
748err_pm_dis: 754err_pm_dis:
749 pm_runtime_disable(&pdev->dev); 755 pm_runtime_disable(&pdev->dev);
756 clk_disable_unprepare(gpio->clk);
750 757
751 return ret; 758 return ret;
752} 759}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d22dcc38179d..4aabddb38b59 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/io-mapping.h>
19#include <linux/gpio/consumer.h> 20#include <linux/gpio/consumer.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904a31c..570771ed19e6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/compat.h>
23#include <uapi/linux/gpio.h> 24#include <uapi/linux/gpio.h>
24 25
25#include "gpiolib.h" 26#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
316{ 317{
317 struct gpio_device *gdev = filp->private_data; 318 struct gpio_device *gdev = filp->private_data;
318 struct gpio_chip *chip = gdev->chip; 319 struct gpio_chip *chip = gdev->chip;
319 int __user *ip = (int __user *)arg; 320 void __user *ip = (void __user *)arg;
320 321
321 /* We fail any subsequent ioctl():s when the chip is gone */ 322 /* We fail any subsequent ioctl():s when the chip is gone */
322 if (!chip) 323 if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
388 return -EINVAL; 389 return -EINVAL;
389} 390}
390 391
392#ifdef CONFIG_COMPAT
393static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
394 unsigned long arg)
395{
396 return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
397}
398#endif
399
391/** 400/**
392 * gpio_chrdev_open() - open the chardev for ioctl operations 401 * gpio_chrdev_open() - open the chardev for ioctl operations
393 * @inode: inode for this chardev 402 * @inode: inode for this chardev
@@ -431,14 +440,15 @@ static const struct file_operations gpio_fileops = {
431 .owner = THIS_MODULE, 440 .owner = THIS_MODULE,
432 .llseek = noop_llseek, 441 .llseek = noop_llseek,
433 .unlocked_ioctl = gpio_ioctl, 442 .unlocked_ioctl = gpio_ioctl,
434 .compat_ioctl = gpio_ioctl, 443#ifdef CONFIG_COMPAT
444 .compat_ioctl = gpio_ioctl_compat,
445#endif
435}; 446};
436 447
437static void gpiodevice_release(struct device *dev) 448static void gpiodevice_release(struct device *dev)
438{ 449{
439 struct gpio_device *gdev = dev_get_drvdata(dev); 450 struct gpio_device *gdev = dev_get_drvdata(dev);
440 451
441 cdev_del(&gdev->chrdev);
442 list_del(&gdev->list); 452 list_del(&gdev->list);
443 ida_simple_remove(&gpio_ida, gdev->id); 453 ida_simple_remove(&gpio_ida, gdev->id);
444 kfree(gdev->label); 454 kfree(gdev->label);
@@ -471,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
471 481
472 /* From this point, the .release() function cleans up gpio_device */ 482 /* From this point, the .release() function cleans up gpio_device */
473 gdev->dev.release = gpiodevice_release; 483 gdev->dev.release = gpiodevice_release;
474 get_device(&gdev->dev);
475 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", 484 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
476 __func__, gdev->base, gdev->base + gdev->ngpio - 1, 485 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
477 dev_name(&gdev->dev), gdev->chip->label ? : "generic"); 486 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
@@ -618,6 +627,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
618 goto err_free_label; 627 goto err_free_label;
619 } 628 }
620 629
630 spin_unlock_irqrestore(&gpio_lock, flags);
631
621 for (i = 0; i < chip->ngpio; i++) { 632 for (i = 0; i < chip->ngpio; i++) {
622 struct gpio_desc *desc = &gdev->descs[i]; 633 struct gpio_desc *desc = &gdev->descs[i];
623 634
@@ -649,8 +660,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
649 } 660 }
650 } 661 }
651 662
652 spin_unlock_irqrestore(&gpio_lock, flags);
653
654#ifdef CONFIG_PINCTRL 663#ifdef CONFIG_PINCTRL
655 INIT_LIST_HEAD(&gdev->pin_ranges); 664 INIT_LIST_HEAD(&gdev->pin_ranges);
656#endif 665#endif
@@ -759,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip)
759 * be removed, else it will be dangling until the last user is 768 * be removed, else it will be dangling until the last user is
760 * gone. 769 * gone.
761 */ 770 */
771 cdev_del(&gdev->chrdev);
772 device_del(&gdev->dev);
762 put_device(&gdev->dev); 773 put_device(&gdev->dev);
763} 774}
764EXPORT_SYMBOL_GPL(gpiochip_remove); 775EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -858,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data,
858 869
859 spin_lock_irqsave(&gpio_lock, flags); 870 spin_lock_irqsave(&gpio_lock, flags);
860 list_for_each_entry(gdev, &gpio_devices, list) 871 list_for_each_entry(gdev, &gpio_devices, list)
861 if (match(gdev->chip, data)) 872 if (gdev->chip && match(gdev->chip, data))
862 break; 873 break;
863 874
864 /* No match? */ 875 /* No match? */
@@ -1356,11 +1367,18 @@ done:
1356/* 1367/*
1357 * This descriptor validation needs to be inserted verbatim into each 1368 * This descriptor validation needs to be inserted verbatim into each
1358 * function taking a descriptor, so we need to use a preprocessor 1369 * function taking a descriptor, so we need to use a preprocessor
1359 * macro to avoid endless duplication. 1370 * macro to avoid endless duplication. If the desc is NULL it is an
1371 * optional GPIO and calls should just bail out.
1360 */ 1372 */
1361#define VALIDATE_DESC(desc) do { \ 1373#define VALIDATE_DESC(desc) do { \
1362 if (!desc || !desc->gdev) { \ 1374 if (!desc) \
1363 pr_warn("%s: invalid GPIO\n", __func__); \ 1375 return 0; \
1376 if (IS_ERR(desc)) { \
1377 pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1378 return PTR_ERR(desc); \
1379 } \
1380 if (!desc->gdev) { \
1381 pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1364 return -EINVAL; \ 1382 return -EINVAL; \
1365 } \ 1383 } \
1366 if ( !desc->gdev->chip ) { \ 1384 if ( !desc->gdev->chip ) { \
@@ -1370,8 +1388,14 @@ done:
1370 } } while (0) 1388 } } while (0)
1371 1389
1372#define VALIDATE_DESC_VOID(desc) do { \ 1390#define VALIDATE_DESC_VOID(desc) do { \
1373 if (!desc || !desc->gdev) { \ 1391 if (!desc) \
1374 pr_warn("%s: invalid GPIO\n", __func__); \ 1392 return; \
1393 if (IS_ERR(desc)) { \
1394 pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1395 return; \
1396 } \
1397 if (!desc->gdev) { \
1398 pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1375 return; \ 1399 return; \
1376 } \ 1400 } \
1377 if (!desc->gdev->chip) { \ 1401 if (!desc->gdev->chip) { \
@@ -2040,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
2040 struct gpio_chip *chip; 2064 struct gpio_chip *chip;
2041 int offset; 2065 int offset;
2042 2066
2043 VALIDATE_DESC(desc); 2067 /*
2068 * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
2069 * requires this function to not return zero on an invalid descriptor
2070 * but rather a negative error number.
2071 */
2072 if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
2073 return -EINVAL;
2074
2044 chip = desc->gdev->chip; 2075 chip = desc->gdev->chip;
2045 offset = gpio_chip_hwgpio(desc); 2076 offset = gpio_chip_hwgpio(desc);
2046 if (chip->to_irq) { 2077 if (chip->to_irq) {
@@ -2066,17 +2097,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
2066 */ 2097 */
2067int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) 2098int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
2068{ 2099{
2069 if (offset >= chip->ngpio) 2100 struct gpio_desc *desc;
2070 return -EINVAL; 2101
2102 desc = gpiochip_get_desc(chip, offset);
2103 if (IS_ERR(desc))
2104 return PTR_ERR(desc);
2105
2106 /* Flush direction if something changed behind our back */
2107 if (chip->get_direction) {
2108 int dir = chip->get_direction(chip, offset);
2109
2110 if (dir)
2111 clear_bit(FLAG_IS_OUT, &desc->flags);
2112 else
2113 set_bit(FLAG_IS_OUT, &desc->flags);
2114 }
2071 2115
2072 if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { 2116 if (test_bit(FLAG_IS_OUT, &desc->flags)) {
2073 chip_err(chip, 2117 chip_err(chip,
2074 "%s: tried to flag a GPIO set as output for IRQ\n", 2118 "%s: tried to flag a GPIO set as output for IRQ\n",
2075 __func__); 2119 __func__);
2076 return -EIO; 2120 return -EIO;
2077 } 2121 }
2078 2122
2079 set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); 2123 set_bit(FLAG_USED_AS_IRQ, &desc->flags);
2080 return 0; 2124 return 0;
2081} 2125}
2082EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); 2126EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index be43afb08c69..e3dba6f44a79 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ 8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
9 drm_scatter.o drm_pci.o \ 9 drm_scatter.o drm_pci.o \
10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
11 drm_crtc.o drm_modes.o drm_edid.o \ 11 drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
12 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 12 drm_info.o drm_debugfs.o drm_encoder_slave.o \
13 drm_trace_points.o drm_global.o drm_prime.o \ 13 drm_trace_points.o drm_global.o drm_prime.o \
14 drm_rect.o drm_vma_manager.o drm_flip_work.o \ 14 drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
23 23
24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ 25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o 26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
27 drm_simple_kms_helper.o
27 28
28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 29drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
29drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 30drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..ac8e02f9105a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1820,6 +1820,8 @@ struct amdgpu_asic_funcs {
1820 /* MM block clocks */ 1820 /* MM block clocks */
1821 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1821 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1822 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1822 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1823 /* query virtual capabilities */
1824 u32 (*get_virtual_caps)(struct amdgpu_device *adev);
1823}; 1825};
1824 1826
1825/* 1827/*
@@ -1914,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1914 1916
1915 1917
1916/* GPU virtualization */ 1918/* GPU virtualization */
1919#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
1920#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
1917struct amdgpu_virtualization { 1921struct amdgpu_virtualization {
1918 bool supports_sr_iov; 1922 bool supports_sr_iov;
1923 bool is_virtual;
1924 u32 caps;
1919}; 1925};
1920 1926
1921/* 1927/*
@@ -2032,7 +2038,7 @@ struct amdgpu_device {
2032 struct amdgpu_irq_src hpd_irq; 2038 struct amdgpu_irq_src hpd_irq;
2033 2039
2034 /* rings */ 2040 /* rings */
2035 unsigned fence_context; 2041 u64 fence_context;
2036 unsigned num_rings; 2042 unsigned num_rings;
2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2043 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2038 bool ib_pool_ready; 2044 bool ib_pool_ready;
@@ -2204,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2204#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2210#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2205#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2211#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2206#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2212#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2213#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2207#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2214#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2208#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2215#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2209#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2216#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 199f76baf22c..cf6f49fc1c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
696 return result; 696 return result;
697} 697}
698 698
699static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
700{
701 CGS_FUNC_ADEV;
702 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
703 release_firmware(adev->pm.fw);
704 return 0;
705 }
706 /* cannot release other firmware because they are not created by cgs */
707 return -EINVAL;
708}
709
699static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 710static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
700 enum cgs_ucode_id type, 711 enum cgs_ucode_id type,
701 struct cgs_firmware_info *info) 712 struct cgs_firmware_info *info)
@@ -898,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
898 struct cgs_acpi_method_argument *argument = NULL; 909 struct cgs_acpi_method_argument *argument = NULL;
899 uint32_t i, count; 910 uint32_t i, count;
900 acpi_status status; 911 acpi_status status;
901 int result; 912 int result = 0;
902 uint32_t func_no = 0xFFFFFFFF; 913 uint32_t func_no = 0xFFFFFFFF;
903 914
904 handle = ACPI_HANDLE(&adev->pdev->dev); 915 handle = ACPI_HANDLE(&adev->pdev->dev);
@@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
1125 amdgpu_cgs_pm_query_clock_limits, 1136 amdgpu_cgs_pm_query_clock_limits,
1126 amdgpu_cgs_set_camera_voltages, 1137 amdgpu_cgs_set_camera_voltages,
1127 amdgpu_cgs_get_firmware_info, 1138 amdgpu_cgs_get_firmware_info,
1139 amdgpu_cgs_rel_firmware,
1128 amdgpu_cgs_set_powergating_state, 1140 amdgpu_cgs_set_powergating_state,
1129 amdgpu_cgs_set_clockgating_state, 1141 amdgpu_cgs_set_clockgating_state,
1130 amdgpu_cgs_get_active_displays_info, 1142 amdgpu_cgs_get_active_displays_info,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bb8b149786d7..6e920086af46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
827 */ 827 */
828static void amdgpu_atombios_fini(struct amdgpu_device *adev) 828static void amdgpu_atombios_fini(struct amdgpu_device *adev)
829{ 829{
830 if (adev->mode_info.atom_context) 830 if (adev->mode_info.atom_context) {
831 kfree(adev->mode_info.atom_context->scratch); 831 kfree(adev->mode_info.atom_context->scratch);
832 kfree(adev->mode_info.atom_context->iio);
833 }
832 kfree(adev->mode_info.atom_context); 834 kfree(adev->mode_info.atom_context);
833 adev->mode_info.atom_context = NULL; 835 adev->mode_info.atom_context = NULL;
834 kfree(adev->mode_info.atom_card_info); 836 kfree(adev->mode_info.atom_card_info);
@@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1325 adev->ip_block_status[i].valid = false; 1327 adev->ip_block_status[i].valid = false;
1326 } 1328 }
1327 1329
1330 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1331 if (adev->ip_blocks[i].funcs->late_fini)
1332 adev->ip_blocks[i].funcs->late_fini((void *)adev);
1333 }
1334
1328 return 0; 1335 return 0;
1329} 1336}
1330 1337
@@ -1378,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1378 return 0; 1385 return 0;
1379} 1386}
1380 1387
1388static bool amdgpu_device_is_virtual(void)
1389{
1390#ifdef CONFIG_X86
1391 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1392#else
1393 return false;
1394#endif
1395}
1396
1381/** 1397/**
1382 * amdgpu_device_init - initialize the driver 1398 * amdgpu_device_init - initialize the driver
1383 * 1399 *
@@ -1512,9 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1512 adev->virtualization.supports_sr_iov = 1528 adev->virtualization.supports_sr_iov =
1513 amdgpu_atombios_has_gpu_virtualization_table(adev); 1529 amdgpu_atombios_has_gpu_virtualization_table(adev);
1514 1530
1531 /* Check if we are executing in a virtualized environment */
1532 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1533 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1534
1515 /* Post card if necessary */ 1535 /* Post card if necessary */
1516 if (!amdgpu_card_posted(adev) || 1536 if (!amdgpu_card_posted(adev) ||
1517 adev->virtualization.supports_sr_iov) { 1537 (adev->virtualization.is_virtual &&
1538 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1518 if (!adev->bios) { 1539 if (!adev->bios) {
1519 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1540 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
1520 return -EINVAL; 1541 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..a6eecf6f9065 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -240,7 +240,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
240 240
241 work->base = base; 241 work->base = base;
242 242
243 r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); 243 r = drm_crtc_vblank_get(crtc);
244 if (r) { 244 if (r) {
245 DRM_ERROR("failed to get vblank before flip\n"); 245 DRM_ERROR("failed to get vblank before flip\n");
246 goto pflip_cleanup; 246 goto pflip_cleanup;
@@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
268 return 0; 268 return 0;
269 269
270vblank_cleanup: 270vblank_cleanup:
271 drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); 271 drm_crtc_vblank_put(crtc);
272 272
273pflip_cleanup: 273pflip_cleanup:
274 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { 274 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 40a23704a981..d851ea15059f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
447 dev_info.max_memory_clock = adev->pm.default_mclk * 10; 447 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
448 } 448 }
449 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 449 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
450 dev_info.num_rb_pipes = adev->gfx.config.num_rbs; 450 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
451 adev->gfx.config.max_shader_engines;
451 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 452 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
452 dev_info._pad = 0; 453 dev_info._pad = 0;
453 dev_info.ids_flags = 0; 454 dev_info.ids_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 589b36e8c5cf..0e13d80d2a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
270 struct drm_device *ddev = dev_get_drvdata(dev); 270 struct drm_device *ddev = dev_get_drvdata(dev);
271 struct amdgpu_device *adev = ddev->dev_private; 271 struct amdgpu_device *adev = ddev->dev_private;
272 enum amd_pm_state_type state = 0; 272 enum amd_pm_state_type state = 0;
273 long idx; 273 unsigned long idx;
274 int ret; 274 int ret;
275 275
276 if (strlen(buf) == 1) 276 if (strlen(buf) == 1)
277 adev->pp_force_state_enabled = false; 277 adev->pp_force_state_enabled = false;
278 else { 278 else if (adev->pp_enabled) {
279 ret = kstrtol(buf, 0, &idx); 279 struct pp_states_info data;
280 280
281 if (ret) { 281 ret = kstrtoul(buf, 0, &idx);
282 if (ret || idx >= ARRAY_SIZE(data.states)) {
282 count = -EINVAL; 283 count = -EINVAL;
283 goto fail; 284 goto fail;
284 } 285 }
285 286
286 if (adev->pp_enabled) { 287 amdgpu_dpm_get_pp_num_states(adev, &data);
287 struct pp_states_info data; 288 state = data.states[idx];
288 amdgpu_dpm_get_pp_num_states(adev, &data); 289 /* only set user selected power states */
289 state = data.states[idx]; 290 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
290 /* only set user selected power states */ 291 state != POWER_STATE_TYPE_DEFAULT) {
291 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 292 amdgpu_dpm_dispatch_task(adev,
292 state != POWER_STATE_TYPE_DEFAULT) { 293 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
293 amdgpu_dpm_dispatch_task(adev, 294 adev->pp_force_state_enabled = true;
294 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
295 adev->pp_force_state_enabled = true;
296 }
297 } 295 }
298 } 296 }
299fail: 297fail:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 6bd961fb43dc..82256558e0f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
183 if (ret) 183 if (ret)
184 return ret; 184 return ret;
185 185
186#ifdef CONFIG_DRM_AMD_POWERPLAY
187 if (adev->pp_enabled) {
188 amdgpu_pm_sysfs_fini(adev);
189 amd_powerplay_fini(adev->powerplay.pp_handle);
190 }
191#endif
192
193 return ret; 186 return ret;
194} 187}
195 188
@@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
223 return ret; 216 return ret;
224} 217}
225 218
219static void amdgpu_pp_late_fini(void *handle)
220{
221#ifdef CONFIG_DRM_AMD_POWERPLAY
222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223
224 if (adev->pp_enabled) {
225 amdgpu_pm_sysfs_fini(adev);
226 amd_powerplay_fini(adev->powerplay.pp_handle);
227 }
228
229 if (adev->powerplay.ip_funcs->late_fini)
230 adev->powerplay.ip_funcs->late_fini(
231 adev->powerplay.pp_handle);
232#endif
233}
234
226static int amdgpu_pp_suspend(void *handle) 235static int amdgpu_pp_suspend(void *handle)
227{ 236{
228 int ret = 0; 237 int ret = 0;
@@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
311 .sw_fini = amdgpu_pp_sw_fini, 320 .sw_fini = amdgpu_pp_sw_fini,
312 .hw_init = amdgpu_pp_hw_init, 321 .hw_init = amdgpu_pp_hw_init,
313 .hw_fini = amdgpu_pp_hw_fini, 322 .hw_fini = amdgpu_pp_hw_fini,
323 .late_fini = amdgpu_pp_late_fini,
314 .suspend = amdgpu_pp_suspend, 324 .suspend = amdgpu_pp_suspend,
315 .resume = amdgpu_pp_resume, 325 .resume = amdgpu_pp_resume,
316 .is_idle = amdgpu_pp_is_idle, 326 .is_idle = amdgpu_pp_is_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3b02272db678..870f9494252c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
343 ring->ring = NULL; 343 ring->ring = NULL;
344 ring->ring_obj = NULL; 344 ring->ring_obj = NULL;
345 345
346 amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
346 amdgpu_wb_free(ring->adev, ring->fence_offs); 347 amdgpu_wb_free(ring->adev, ring->fence_offs);
347 amdgpu_wb_free(ring->adev, ring->rptr_offs); 348 amdgpu_wb_free(ring->adev, ring->rptr_offs);
348 amdgpu_wb_free(ring->adev, ring->wptr_offs); 349 amdgpu_wb_free(ring->adev, ring->wptr_offs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..d8af37a845f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
115 return r; 115 return r;
116 } 116 }
117 r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 117 r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
118 memset(sa_manager->cpu_ptr, 0, sa_manager->size);
118 amdgpu_bo_unreserve(sa_manager->bo); 119 amdgpu_bo_unreserve(sa_manager->bo);
119 return r; 120 return r;
120} 121}
@@ -427,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
427 soffset, eoffset, eoffset - soffset); 428 soffset, eoffset, eoffset - soffset);
428 429
429 if (i->fence) 430 if (i->fence)
430 seq_printf(m, " protected by 0x%08x on context %d", 431 seq_printf(m, " protected by 0x%08x on context %llu",
431 i->fence->seqno, i->fence->context); 432 i->fence->seqno, i->fence->context);
432 433
433 seq_printf(m, "\n"); 434 seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 01abfc21b4a2..e19520c4b4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
253{ 253{
254 int r; 254 int r;
255 255
256 if (adev->uvd.vcpu_bo == NULL) 256 kfree(adev->uvd.saved_bo);
257 return 0;
258 257
259 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); 258 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
260 259
261 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); 260 if (adev->uvd.vcpu_bo) {
262 if (!r) { 261 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
263 amdgpu_bo_kunmap(adev->uvd.vcpu_bo); 262 if (!r) {
264 amdgpu_bo_unpin(adev->uvd.vcpu_bo); 263 amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
265 amdgpu_bo_unreserve(adev->uvd.vcpu_bo); 264 amdgpu_bo_unpin(adev->uvd.vcpu_bo);
266 } 265 amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
266 }
267 267
268 amdgpu_bo_unref(&adev->uvd.vcpu_bo); 268 amdgpu_bo_unref(&adev->uvd.vcpu_bo);
269 }
269 270
270 amdgpu_ring_fini(&adev->uvd.ring); 271 amdgpu_ring_fini(&adev->uvd.ring);
271 272
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ea407db1fbcf..5ec1f1e9c983 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
6221 ci_dpm_fini(adev); 6221 ci_dpm_fini(adev);
6222 mutex_unlock(&adev->pm.mutex); 6222 mutex_unlock(&adev->pm.mutex);
6223 6223
6224 release_firmware(adev->pm.fw);
6225 adev->pm.fw = NULL;
6226
6224 return 0; 6227 return 0;
6225} 6228}
6226 6229
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 07bc795a4ca9..910431808542 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
962 return true; 962 return true;
963} 963}
964 964
965static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
966{
967 /* CIK does not support SR-IOV */
968 return 0;
969}
970
965static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 971static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
966 {mmGRBM_STATUS, false}, 972 {mmGRBM_STATUS, false},
967 {mmGB_ADDR_CONFIG, false}, 973 {mmGB_ADDR_CONFIG, false},
@@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
2007 .get_xclk = &cik_get_xclk, 2013 .get_xclk = &cik_get_xclk,
2008 .set_uvd_clocks = &cik_set_uvd_clocks, 2014 .set_uvd_clocks = &cik_set_uvd_clocks,
2009 .set_vce_clocks = &cik_set_vce_clocks, 2015 .set_vce_clocks = &cik_set_vce_clocks,
2016 .get_virtual_caps = &cik_get_virtual_caps,
2010 /* these should be moved to their own ip modules */ 2017 /* these should be moved to their own ip modules */
2011 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 2018 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
2012 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, 2019 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 518dca43b133..9dc4e24e31e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
66 66
67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
68 68
69
70static void cik_sdma_free_microcode(struct amdgpu_device *adev)
71{
72 int i;
73 for (i = 0; i < adev->sdma.num_instances; i++) {
74 release_firmware(adev->sdma.instance[i].fw);
75 adev->sdma.instance[i].fw = NULL;
76 }
77}
78
69/* 79/*
70 * sDMA - System DMA 80 * sDMA - System DMA
71 * Starting with CIK, the GPU has new asynchronous 81 * Starting with CIK, the GPU has new asynchronous
@@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
419 /* Initialize the ring buffer's read and write pointers */ 429 /* Initialize the ring buffer's read and write pointers */
420 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 430 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
421 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 431 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
432 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
433 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
422 434
423 /* set the wb address whether it's enabled or not */ 435 /* set the wb address whether it's enabled or not */
424 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 436 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
446 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 458 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
447 459
448 ring->ready = true; 460 ring->ready = true;
461 }
462
463 cik_sdma_enable(adev, true);
449 464
465 for (i = 0; i < adev->sdma.num_instances; i++) {
466 ring = &adev->sdma.instance[i].ring;
450 r = amdgpu_ring_test_ring(ring); 467 r = amdgpu_ring_test_ring(ring);
451 if (r) { 468 if (r) {
452 ring->ready = false; 469 ring->ready = false;
@@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
529 if (r) 546 if (r)
530 return r; 547 return r;
531 548
532 /* unhalt the MEs */ 549 /* halt the engine before programing */
533 cik_sdma_enable(adev, true); 550 cik_sdma_enable(adev, false);
534 551
535 /* start the gfx rings and rlc compute queues */ 552 /* start the gfx rings and rlc compute queues */
536 r = cik_sdma_gfx_resume(adev); 553 r = cik_sdma_gfx_resume(adev);
@@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
998 for (i = 0; i < adev->sdma.num_instances; i++) 1015 for (i = 0; i < adev->sdma.num_instances; i++)
999 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1016 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1000 1017
1018 cik_sdma_free_microcode(adev);
1001 return 0; 1019 return 0;
1002} 1020}
1003 1021
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..c1b04e9aab57 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2667 } 2667 }
2668} 2668}
2669 2669
2670static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2670static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2671 u16 *blue, uint32_t start, uint32_t size) 2671 u16 *blue, uint32_t size)
2672{ 2672{
2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2674 int end = (start + size > 256) ? 256 : start + size, i; 2674 int i;
2675 2675
2676 /* userspace palettes are always correct as is */ 2676 /* userspace palettes are always correct as is */
2677 for (i = start; i < end; i++) { 2677 for (i = 0; i < size; i++) {
2678 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2678 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2679 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2679 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2681 } 2681 }
2682 dce_v10_0_crtc_load_lut(crtc); 2682 dce_v10_0_crtc_load_lut(crtc);
2683
2684 return 0;
2683} 2685}
2684 2686
2685static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) 2687static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2717 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2719 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2718 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2720 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2719 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2721 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2720 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2722 drm_crtc_vblank_on(crtc);
2721 dce_v10_0_crtc_load_lut(crtc); 2723 dce_v10_0_crtc_load_lut(crtc);
2722 break; 2724 break;
2723 case DRM_MODE_DPMS_STANDBY: 2725 case DRM_MODE_DPMS_STANDBY:
2724 case DRM_MODE_DPMS_SUSPEND: 2726 case DRM_MODE_DPMS_SUSPEND:
2725 case DRM_MODE_DPMS_OFF: 2727 case DRM_MODE_DPMS_OFF:
2726 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2728 drm_crtc_vblank_off(crtc);
2727 if (amdgpu_crtc->enabled) { 2729 if (amdgpu_crtc->enabled) {
2728 dce_v10_0_vga_enable(crtc, true); 2730 dce_v10_0_vga_enable(crtc, true);
2729 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2731 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3372 3374
3373 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3375 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3374 3376
3375 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3377 drm_crtc_vblank_put(&amdgpu_crtc->base);
3376 schedule_work(&works->unpin_work); 3378 schedule_work(&works->unpin_work);
3377 3379
3378 return 0; 3380 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..c90408bc0fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2678,19 +2678,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2678 } 2678 }
2679} 2679}
2680 2680
2681static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2681static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2682 u16 *blue, uint32_t start, uint32_t size) 2682 u16 *blue, uint32_t size)
2683{ 2683{
2684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2685 int end = (start + size > 256) ? 256 : start + size, i; 2685 int i;
2686 2686
2687 /* userspace palettes are always correct as is */ 2687 /* userspace palettes are always correct as is */
2688 for (i = start; i < end; i++) { 2688 for (i = 0; i < size; i++) {
2689 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2689 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2690 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2690 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2691 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2691 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2692 } 2692 }
2693 dce_v11_0_crtc_load_lut(crtc); 2693 dce_v11_0_crtc_load_lut(crtc);
2694
2695 return 0;
2694} 2696}
2695 2697
2696static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) 2698static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2730,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2728 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2730 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2729 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2731 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2730 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2732 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2731 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2733 drm_crtc_vblank_on(crtc);
2732 dce_v11_0_crtc_load_lut(crtc); 2734 dce_v11_0_crtc_load_lut(crtc);
2733 break; 2735 break;
2734 case DRM_MODE_DPMS_STANDBY: 2736 case DRM_MODE_DPMS_STANDBY:
2735 case DRM_MODE_DPMS_SUSPEND: 2737 case DRM_MODE_DPMS_SUSPEND:
2736 case DRM_MODE_DPMS_OFF: 2738 case DRM_MODE_DPMS_OFF:
2737 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2739 drm_crtc_vblank_off(crtc);
2738 if (amdgpu_crtc->enabled) { 2740 if (amdgpu_crtc->enabled) {
2739 dce_v11_0_vga_enable(crtc, true); 2741 dce_v11_0_vga_enable(crtc, true);
2740 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2742 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3435,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3433 3435
3434 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3435 3437
3436 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3438 drm_crtc_vblank_put(&amdgpu_crtc->base);
3437 schedule_work(&works->unpin_work); 3439 schedule_work(&works->unpin_work);
3438 3440
3439 return 0; 3441 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..300ff4aab0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2574,19 +2574,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2574 } 2574 }
2575} 2575}
2576 2576
2577static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2577static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2578 u16 *blue, uint32_t start, uint32_t size) 2578 u16 *blue, uint32_t size)
2579{ 2579{
2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2581 int end = (start + size > 256) ? 256 : start + size, i; 2581 int i;
2582 2582
2583 /* userspace palettes are always correct as is */ 2583 /* userspace palettes are always correct as is */
2584 for (i = start; i < end; i++) { 2584 for (i = 0; i < size; i++) {
2585 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2585 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2586 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2586 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2587 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2587 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2588 } 2588 }
2589 dce_v8_0_crtc_load_lut(crtc); 2589 dce_v8_0_crtc_load_lut(crtc);
2590
2591 return 0;
2590} 2592}
2591 2593
2592static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2594static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2626,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2624 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2626 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2625 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2627 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2626 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2628 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2627 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2629 drm_crtc_vblank_on(crtc);
2628 dce_v8_0_crtc_load_lut(crtc); 2630 dce_v8_0_crtc_load_lut(crtc);
2629 break; 2631 break;
2630 case DRM_MODE_DPMS_STANDBY: 2632 case DRM_MODE_DPMS_STANDBY:
2631 case DRM_MODE_DPMS_SUSPEND: 2633 case DRM_MODE_DPMS_SUSPEND:
2632 case DRM_MODE_DPMS_OFF: 2634 case DRM_MODE_DPMS_OFF:
2633 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2635 drm_crtc_vblank_off(crtc);
2634 if (amdgpu_crtc->enabled) { 2636 if (amdgpu_crtc->enabled) {
2635 dce_v8_0_vga_enable(crtc, true); 2637 dce_v8_0_vga_enable(crtc, true);
2636 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2638 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3378,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3376 3378
3377 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3379 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3378 3380
3379 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3381 drm_crtc_vblank_put(&amdgpu_crtc->base);
3380 schedule_work(&works->unpin_work); 3382 schedule_work(&works->unpin_work);
3381 3383
3382 return 0; 3384 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index 245cabf06575..ed03b75175d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
72 72
73static int fiji_dpm_sw_fini(void *handle) 73static int fiji_dpm_sw_fini(void *handle)
74{ 74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
75 return 0; 80 return 0;
76} 81}
77 82
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7f18a53ab53a..fc8ff4d3ccf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -991,6 +991,22 @@ out:
991 return err; 991 return err;
992} 992}
993 993
994static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
995{
996 release_firmware(adev->gfx.pfp_fw);
997 adev->gfx.pfp_fw = NULL;
998 release_firmware(adev->gfx.me_fw);
999 adev->gfx.me_fw = NULL;
1000 release_firmware(adev->gfx.ce_fw);
1001 adev->gfx.ce_fw = NULL;
1002 release_firmware(adev->gfx.mec_fw);
1003 adev->gfx.mec_fw = NULL;
1004 release_firmware(adev->gfx.mec2_fw);
1005 adev->gfx.mec2_fw = NULL;
1006 release_firmware(adev->gfx.rlc_fw);
1007 adev->gfx.rlc_fw = NULL;
1008}
1009
994/** 1010/**
995 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table 1011 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
996 * 1012 *
@@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
4489 gfx_v7_0_cp_compute_fini(adev); 4505 gfx_v7_0_cp_compute_fini(adev);
4490 gfx_v7_0_rlc_fini(adev); 4506 gfx_v7_0_rlc_fini(adev);
4491 gfx_v7_0_mec_fini(adev); 4507 gfx_v7_0_mec_fini(adev);
4508 gfx_v7_0_free_microcode(adev);
4492 4509
4493 return 0; 4510 return 0;
4494} 4511}
@@ -4816,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4816 case 2: 4833 case 2:
4817 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4834 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4818 ring = &adev->gfx.compute_ring[i]; 4835 ring = &adev->gfx.compute_ring[i];
4819 if ((ring->me == me_id) & (ring->pipe == pipe_id)) 4836 if ((ring->me == me_id) && (ring->pipe == pipe_id))
4820 amdgpu_fence_process(ring); 4837 amdgpu_fence_process(ring);
4821 } 4838 }
4822 break; 4839 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f19bab68fd83..1a5cbaff1e34 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] =
297static const u32 golden_settings_polaris10_a11[] = 297static const u32 golden_settings_polaris10_a11[] =
298{ 298{
299 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 299 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
300 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, 300 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
301 mmCB_HW_CONTROL_2, 0, 0x0f000000,
301 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 302 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
302 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 303 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
303 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 304 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -836,6 +837,26 @@ err1:
836 return r; 837 return r;
837} 838}
838 839
840
841static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
842 release_firmware(adev->gfx.pfp_fw);
843 adev->gfx.pfp_fw = NULL;
844 release_firmware(adev->gfx.me_fw);
845 adev->gfx.me_fw = NULL;
846 release_firmware(adev->gfx.ce_fw);
847 adev->gfx.ce_fw = NULL;
848 release_firmware(adev->gfx.rlc_fw);
849 adev->gfx.rlc_fw = NULL;
850 release_firmware(adev->gfx.mec_fw);
851 adev->gfx.mec_fw = NULL;
852 if ((adev->asic_type != CHIP_STONEY) &&
853 (adev->asic_type != CHIP_TOPAZ))
854 release_firmware(adev->gfx.mec2_fw);
855 adev->gfx.mec2_fw = NULL;
856
857 kfree(adev->gfx.rlc.register_list_format);
858}
859
839static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) 860static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
840{ 861{
841 const char *chip_name; 862 const char *chip_name;
@@ -1983,7 +2004,7 @@ static int gfx_v8_0_sw_fini(void *handle)
1983 2004
1984 gfx_v8_0_rlc_fini(adev); 2005 gfx_v8_0_rlc_fini(adev);
1985 2006
1986 kfree(adev->gfx.rlc.register_list_format); 2007 gfx_v8_0_free_microcode(adev);
1987 2008
1988 return 0; 2009 return 0;
1989} 2010}
@@ -3974,11 +3995,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
3974 amdgpu_ring_write(ring, 0x3a00161a); 3995 amdgpu_ring_write(ring, 0x3a00161a);
3975 amdgpu_ring_write(ring, 0x0000002e); 3996 amdgpu_ring_write(ring, 0x0000002e);
3976 break; 3997 break;
3977 case CHIP_TOPAZ:
3978 case CHIP_CARRIZO: 3998 case CHIP_CARRIZO:
3979 amdgpu_ring_write(ring, 0x00000002); 3999 amdgpu_ring_write(ring, 0x00000002);
3980 amdgpu_ring_write(ring, 0x00000000); 4000 amdgpu_ring_write(ring, 0x00000000);
3981 break; 4001 break;
4002 case CHIP_TOPAZ:
4003 amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
4004 0x00000000 : 0x00000002);
4005 amdgpu_ring_write(ring, 0x00000000);
4006 break;
3982 case CHIP_STONEY: 4007 case CHIP_STONEY:
3983 amdgpu_ring_write(ring, 0x00000000); 4008 amdgpu_ring_write(ring, 0x00000000);
3984 amdgpu_ring_write(ring, 0x00000000); 4009 amdgpu_ring_write(ring, 0x00000000);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 460bc8ad37e6..825ccd63f2dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
72 72
73static int iceland_dpm_sw_fini(void *handle) 73static int iceland_dpm_sw_fini(void *handle)
74{ 74{
75 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76
77 release_firmware(adev->pm.fw);
78 adev->pm.fw = NULL;
79
75 return 0; 80 return 0;
76} 81}
77 82
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f4c3130d3fdb..b556bd0a8797 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
105 } 105 }
106} 106}
107 107
108static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
109{
110 int i;
111 for (i = 0; i < adev->sdma.num_instances; i++) {
112 release_firmware(adev->sdma.instance[i].fw);
113 adev->sdma.instance[i].fw = NULL;
114 }
115}
116
108/** 117/**
109 * sdma_v2_4_init_microcode - load ucode images from disk 118 * sdma_v2_4_init_microcode - load ucode images from disk
110 * 119 *
@@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
461 /* Initialize the ring buffer's read and write pointers */ 470 /* Initialize the ring buffer's read and write pointers */
462 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 471 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
463 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 472 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
473 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
474 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
464 475
465 /* set the wb address whether it's enabled or not */ 476 /* set the wb address whether it's enabled or not */
466 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 477 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
489 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 500 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
490 501
491 ring->ready = true; 502 ring->ready = true;
503 }
492 504
505 sdma_v2_4_enable(adev, true);
506 for (i = 0; i < adev->sdma.num_instances; i++) {
507 ring = &adev->sdma.instance[i].ring;
493 r = amdgpu_ring_test_ring(ring); 508 r = amdgpu_ring_test_ring(ring);
494 if (r) { 509 if (r) {
495 ring->ready = false; 510 ring->ready = false;
@@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
580 return -EINVAL; 595 return -EINVAL;
581 } 596 }
582 597
583 /* unhalt the MEs */ 598 /* halt the engine before programing */
584 sdma_v2_4_enable(adev, true); 599 sdma_v2_4_enable(adev, false);
585 600
586 /* start the gfx rings and rlc compute queues */ 601 /* start the gfx rings and rlc compute queues */
587 r = sdma_v2_4_gfx_resume(adev); 602 r = sdma_v2_4_gfx_resume(adev);
@@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
1012 for (i = 0; i < adev->sdma.num_instances; i++) 1027 for (i = 0; i < adev->sdma.num_instances; i++)
1013 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1028 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1014 1029
1030 sdma_v2_4_free_microcode(adev);
1015 return 0; 1031 return 0;
1016} 1032}
1017 1033
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31d99b0010f7..532ea88da66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
236 } 236 }
237} 237}
238 238
239static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
240{
241 int i;
242 for (i = 0; i < adev->sdma.num_instances; i++) {
243 release_firmware(adev->sdma.instance[i].fw);
244 adev->sdma.instance[i].fw = NULL;
245 }
246}
247
239/** 248/**
240 * sdma_v3_0_init_microcode - load ucode images from disk 249 * sdma_v3_0_init_microcode - load ucode images from disk
241 * 250 *
@@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
672 /* Initialize the ring buffer's read and write pointers */ 681 /* Initialize the ring buffer's read and write pointers */
673 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 682 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
674 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 683 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
684 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
685 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
675 686
676 /* set the wb address whether it's enabled or not */ 687 /* set the wb address whether it's enabled or not */
677 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 688 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
711 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 722 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
712 723
713 ring->ready = true; 724 ring->ready = true;
725 }
726
727 /* unhalt the MEs */
728 sdma_v3_0_enable(adev, true);
729 /* enable sdma ring preemption */
730 sdma_v3_0_ctx_switch_enable(adev, true);
714 731
732 for (i = 0; i < adev->sdma.num_instances; i++) {
733 ring = &adev->sdma.instance[i].ring;
715 r = amdgpu_ring_test_ring(ring); 734 r = amdgpu_ring_test_ring(ring);
716 if (r) { 735 if (r) {
717 ring->ready = false; 736 ring->ready = false;
@@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
804 } 823 }
805 } 824 }
806 825
807 /* unhalt the MEs */ 826 /* disble sdma engine before programing it */
808 sdma_v3_0_enable(adev, true); 827 sdma_v3_0_ctx_switch_enable(adev, false);
809 /* enable sdma ring preemption */ 828 sdma_v3_0_enable(adev, false);
810 sdma_v3_0_ctx_switch_enable(adev, true);
811 829
812 /* start the gfx rings and rlc compute queues */ 830 /* start the gfx rings and rlc compute queues */
813 r = sdma_v3_0_gfx_resume(adev); 831 r = sdma_v3_0_gfx_resume(adev);
@@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
1247 for (i = 0; i < adev->sdma.num_instances; i++) 1265 for (i = 0; i < adev->sdma.num_instances; i++)
1248 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1266 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1249 1267
1268 sdma_v3_0_free_microcode(adev);
1250 return 0; 1269 return 0;
1251} 1270}
1252 1271
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index b7615cefcac4..f06f6f4dc3a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
71 71
72static int tonga_dpm_sw_fini(void *handle) 72static int tonga_dpm_sw_fini(void *handle)
73{ 73{
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75
76 release_firmware(adev->pm.fw);
77 adev->pm.fw = NULL;
78
74 return 0; 79 return 0;
75} 80}
76 81
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 2c88d0b66cf3..a65c96029476 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421 return true; 421 return true;
422} 422}
423 423
424static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
425{
426 u32 caps = 0;
427 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
428
429 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
430 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
431
432 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
433 caps |= AMDGPU_VIRT_CAPS_IS_VF;
434
435 return caps;
436}
437
424static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 438static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
425 {mmGB_MACROTILE_MODE7, true}, 439 {mmGB_MACROTILE_MODE7, true},
426}; 440};
@@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
1118 .get_xclk = &vi_get_xclk, 1132 .get_xclk = &vi_get_xclk,
1119 .set_uvd_clocks = &vi_set_uvd_clocks, 1133 .set_uvd_clocks = &vi_set_uvd_clocks,
1120 .set_vce_clocks = &vi_set_vce_clocks, 1134 .set_vce_clocks = &vi_set_vce_clocks,
1135 .get_virtual_caps = &vi_get_virtual_caps,
1121 /* these should be moved to their own ip modules */ 1136 /* these should be moved to their own ip modules */
1122 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1137 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1123 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, 1138 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ec4036a09f3e..a625b9137da2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm,
187unsigned int get_first_pipe(struct device_queue_manager *dqm); 187unsigned int get_first_pipe(struct device_queue_manager *dqm);
188unsigned int get_pipes_num(struct device_queue_manager *dqm); 188unsigned int get_pipes_num(struct device_queue_manager *dqm);
189 189
190extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 190static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
191{ 191{
192 return (pdd->lds_base >> 16) & 0xFF; 192 return (pdd->lds_base >> 16) & 0xFF;
193} 193}
194 194
195extern inline unsigned int 195static inline unsigned int
196get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) 196get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
197{ 197{
198 return (pdd->lds_base >> 60) & 0x0E; 198 return (pdd->lds_base >> 60) & 0x0E;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d0d5f4baf72d..80113c335966 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
617int kfd_init_apertures(struct kfd_process *process); 617int kfd_init_apertures(struct kfd_process *process);
618 618
619/* Queue Context Management */ 619/* Queue Context Management */
620inline uint32_t lower_32(uint64_t x);
621inline uint32_t upper_32(uint64_t x);
622struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); 620struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
623inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
624 621
625int init_queue(struct queue **q, struct queue_properties properties); 622int init_queue(struct queue **q, struct queue_properties properties);
626void uninit_queue(struct queue *q); 623void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ac005796b71c..7708d90b9da9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
242 pqm_uninit(&p->pqm); 242 pqm_uninit(&p->pqm);
243 243
244 /* Iterate over all process device data structure and check 244 /* Iterate over all process device data structure and check
245 * if we should reset all wavefronts */ 245 * if we should delete debug managers and reset all wavefronts
246 list_for_each_entry(pdd, &p->per_device_data, per_device_list) 246 */
247 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
248 if ((pdd->dev->dbgmgr) &&
249 (pdd->dev->dbgmgr->pasid == p->pasid))
250 kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
251
247 if (pdd->reset_wavefronts) { 252 if (pdd->reset_wavefronts) {
248 pr_warn("amdkfd: Resetting all wave fronts\n"); 253 pr_warn("amdkfd: Resetting all wave fronts\n");
249 dbgdev_wave_reset_wavefronts(pdd->dev, p); 254 dbgdev_wave_reset_wavefronts(pdd->dev, p);
250 pdd->reset_wavefronts = false; 255 pdd->reset_wavefronts = false;
251 } 256 }
257 }
252 258
253 mutex_unlock(&p->mutex); 259 mutex_unlock(&p->mutex);
254 260
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
404 410
405 idx = srcu_read_lock(&kfd_processes_srcu); 411 idx = srcu_read_lock(&kfd_processes_srcu);
406 412
413 /*
414 * Look for the process that matches the pasid. If there is no such
415 * process, we either released it in amdkfd's own notifier, or there
416 * is a bug. Unfortunately, there is no way to tell...
417 */
407 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) 418 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
408 if (p->pasid == pasid) 419 if (p->pasid == pasid) {
409 break;
410 420
411 srcu_read_unlock(&kfd_processes_srcu, idx); 421 srcu_read_unlock(&kfd_processes_srcu, idx);
412 422
413 BUG_ON(p->pasid != pasid); 423 pr_debug("Unbinding process %d from IOMMU\n", pasid);
414 424
415 mutex_lock(&p->mutex); 425 mutex_lock(&p->mutex);
416 426
417 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) 427 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
418 kfd_dbgmgr_destroy(dev->dbgmgr); 428 kfd_dbgmgr_destroy(dev->dbgmgr);
419 429
420 pqm_uninit(&p->pqm); 430 pqm_uninit(&p->pqm);
421 431
422 pdd = kfd_get_process_device_data(dev, p); 432 pdd = kfd_get_process_device_data(dev, p);
423 433
424 if (!pdd) { 434 if (!pdd) {
425 mutex_unlock(&p->mutex); 435 mutex_unlock(&p->mutex);
426 return; 436 return;
427 } 437 }
428 438
429 if (pdd->reset_wavefronts) { 439 if (pdd->reset_wavefronts) {
430 dbgdev_wave_reset_wavefronts(pdd->dev, p); 440 dbgdev_wave_reset_wavefronts(pdd->dev, p);
431 pdd->reset_wavefronts = false; 441 pdd->reset_wavefronts = false;
432 } 442 }
433 443
434 /* 444 /*
435 * Just mark pdd as unbound, because we still need it to call 445 * Just mark pdd as unbound, because we still need it
436 * amd_iommu_unbind_pasid() in when the process exits. 446 * to call amd_iommu_unbind_pasid() in when the
437 * We don't call amd_iommu_unbind_pasid() here 447 * process exits.
438 * because the IOMMU called us. 448 * We don't call amd_iommu_unbind_pasid() here
439 */ 449 * because the IOMMU called us.
440 pdd->bound = false; 450 */
451 pdd->bound = false;
441 452
442 mutex_unlock(&p->mutex); 453 mutex_unlock(&p->mutex);
454
455 return;
456 }
457
458 srcu_read_unlock(&kfd_processes_srcu, idx);
443} 459}
444 460
445struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) 461struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 74909e72a009..884c96f50c3d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
666 dev->node_props.simd_count); 666 dev->node_props.simd_count);
667 667
668 if (dev->mem_bank_count < dev->node_props.mem_banks_count) { 668 if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
669 pr_warn("kfd: mem_banks_count truncated from %d to %d\n", 669 pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
670 dev->node_props.mem_banks_count, 670 dev->node_props.mem_banks_count,
671 dev->mem_bank_count); 671 dev->mem_bank_count);
672 sysfs_show_32bit_prop(buffer, "mem_banks_count", 672 sysfs_show_32bit_prop(buffer, "mem_banks_count",
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 6080951d539d..afce1edbe250 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -157,6 +157,7 @@ struct amd_ip_funcs {
157 int (*hw_init)(void *handle); 157 int (*hw_init)(void *handle);
158 /* tears down the hw state */ 158 /* tears down the hw state */
159 int (*hw_fini)(void *handle); 159 int (*hw_fini)(void *handle);
160 void (*late_fini)(void *handle);
160 /* handles IP specific hw/sw changes for suspend */ 161 /* handles IP specific hw/sw changes for suspend */
161 int (*suspend)(void *handle); 162 int (*suspend)(void *handle);
162 /* handles IP specific hw/sw changes for resume */ 163 /* handles IP specific hw/sw changes for resume */
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 32f3e345de08..3493da5c8f0e 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5
5538 ULONG ulReserved[12]; 5538 ULONG ulReserved[12];
5539}ATOM_ASIC_PROFILING_INFO_V3_5; 5539}ATOM_ASIC_PROFILING_INFO_V3_5;
5540 5540
5541/* for Polars10/11 AVFS parameters */
5542typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6
5543{
5544 ATOM_COMMON_TABLE_HEADER asHeader;
5545 ULONG ulMaxVddc;
5546 ULONG ulMinVddc;
5547 USHORT usLkgEuseIndex;
5548 UCHAR ucLkgEfuseBitLSB;
5549 UCHAR ucLkgEfuseLength;
5550 ULONG ulLkgEncodeLn_MaxDivMin;
5551 ULONG ulLkgEncodeMax;
5552 ULONG ulLkgEncodeMin;
5553 EFUSE_LINEAR_FUNC_PARAM sRoFuse;
5554 ULONG ulEvvDefaultVddc;
5555 ULONG ulEvvNoCalcVddc;
5556 ULONG ulSpeed_Model;
5557 ULONG ulSM_A0;
5558 ULONG ulSM_A1;
5559 ULONG ulSM_A2;
5560 ULONG ulSM_A3;
5561 ULONG ulSM_A4;
5562 ULONG ulSM_A5;
5563 ULONG ulSM_A6;
5564 ULONG ulSM_A7;
5565 UCHAR ucSM_A0_sign;
5566 UCHAR ucSM_A1_sign;
5567 UCHAR ucSM_A2_sign;
5568 UCHAR ucSM_A3_sign;
5569 UCHAR ucSM_A4_sign;
5570 UCHAR ucSM_A5_sign;
5571 UCHAR ucSM_A6_sign;
5572 UCHAR ucSM_A7_sign;
5573 ULONG ulMargin_RO_a;
5574 ULONG ulMargin_RO_b;
5575 ULONG ulMargin_RO_c;
5576 ULONG ulMargin_fixed;
5577 ULONG ulMargin_Fmax_mean;
5578 ULONG ulMargin_plat_mean;
5579 ULONG ulMargin_Fmax_sigma;
5580 ULONG ulMargin_plat_sigma;
5581 ULONG ulMargin_DC_sigma;
5582 ULONG ulLoadLineSlop;
5583 ULONG ulaTDClimitPerDPM[8];
5584 ULONG ulaNoCalcVddcPerDPM[8];
5585 ULONG ulAVFS_meanNsigma_Acontant0;
5586 ULONG ulAVFS_meanNsigma_Acontant1;
5587 ULONG ulAVFS_meanNsigma_Acontant2;
5588 USHORT usAVFS_meanNsigma_DC_tol_sigma;
5589 USHORT usAVFS_meanNsigma_Platform_mean;
5590 USHORT usAVFS_meanNsigma_Platform_sigma;
5591 ULONG ulGB_VDROOP_TABLE_CKSOFF_a0;
5592 ULONG ulGB_VDROOP_TABLE_CKSOFF_a1;
5593 ULONG ulGB_VDROOP_TABLE_CKSOFF_a2;
5594 ULONG ulGB_VDROOP_TABLE_CKSON_a0;
5595 ULONG ulGB_VDROOP_TABLE_CKSON_a1;
5596 ULONG ulGB_VDROOP_TABLE_CKSON_a2;
5597 ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
5598 USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
5599 ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b;
5600 ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1;
5601 USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
5602 ULONG ulAVFSGB_FUSE_TABLE_CKSON_b;
5603 USHORT usMaxVoltage_0_25mv;
5604 UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF;
5605 UCHAR ucEnableGB_VDROOP_TABLE_CKSON;
5606 UCHAR ucEnableGB_FUSE_TABLE_CKSOFF;
5607 UCHAR ucEnableGB_FUSE_TABLE_CKSON;
5608 USHORT usPSM_Age_ComFactor;
5609 UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage;
5610 UCHAR ucReserved;
5611}ATOM_ASIC_PROFILING_INFO_V3_6;
5612
5541 5613
5542typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ 5614typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
5543 ULONG ulMaxSclkFreq; 5615 ULONG ulMaxSclkFreq;
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a461e155a160..7464daf89ca1 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
581 enum cgs_ucode_id type, 581 enum cgs_ucode_id type,
582 struct cgs_firmware_info *info); 582 struct cgs_firmware_info *info);
583 583
584typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
585 enum cgs_ucode_id type);
586
584typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, 587typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
585 enum amd_ip_block_type block_type, 588 enum amd_ip_block_type block_type,
586 enum amd_powergating_state state); 589 enum amd_powergating_state state);
@@ -645,6 +648,7 @@ struct cgs_ops {
645 cgs_set_camera_voltages_t set_camera_voltages; 648 cgs_set_camera_voltages_t set_camera_voltages;
646 /* Firmware Info */ 649 /* Firmware Info */
647 cgs_get_firmware_info get_firmware_info; 650 cgs_get_firmware_info get_firmware_info;
651 cgs_rel_firmware rel_firmware;
648 /* cg pg interface*/ 652 /* cg pg interface*/
649 cgs_set_powergating_state set_powergating_state; 653 cgs_set_powergating_state set_powergating_state;
650 cgs_set_clockgating_state set_clockgating_state; 654 cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +742,8 @@ struct cgs_device
738 CGS_CALL(set_camera_voltages,dev,mask,voltages) 742 CGS_CALL(set_camera_voltages,dev,mask,voltages)
739#define cgs_get_firmware_info(dev, type, info) \ 743#define cgs_get_firmware_info(dev, type, info) \
740 CGS_CALL(get_firmware_info, dev, type, info) 744 CGS_CALL(get_firmware_info, dev, type, info)
745#define cgs_rel_firmware(dev, type) \
746 CGS_CALL(rel_firmware, dev, type)
741#define cgs_set_powergating_state(dev, block_type, state) \ 747#define cgs_set_powergating_state(dev, block_type, state) \
742 CGS_CALL(set_powergating_state, dev, block_type, state) 748 CGS_CALL(set_powergating_state, dev, block_type, state)
743#define cgs_set_clockgating_state(dev, block_type, state) \ 749#define cgs_set_clockgating_state(dev, block_type, state) \
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8e345bfddb69..e629f8a9fe93 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
73 73
74 ret = hwmgr->hwmgr_func->backend_init(hwmgr); 74 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
75 if (ret) 75 if (ret)
76 goto err; 76 goto err1;
77 77
78 pr_info("amdgpu: powerplay initialized\n"); 78 pr_info("amdgpu: powerplay initialized\n");
79 79
80 return 0; 80 return 0;
81err1:
82 if (hwmgr->pptable_func->pptable_fini)
83 hwmgr->pptable_func->pptable_fini(hwmgr);
81err: 84err:
82 pr_err("amdgpu: powerplay initialization failed\n"); 85 pr_err("amdgpu: powerplay initialization failed\n");
83 return ret; 86 return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
100 if (hwmgr->hwmgr_func->backend_fini != NULL) 103 if (hwmgr->hwmgr_func->backend_fini != NULL)
101 ret = hwmgr->hwmgr_func->backend_fini(hwmgr); 104 ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
102 105
106 if (hwmgr->pptable_func->pptable_fini)
107 hwmgr->pptable_func->pptable_fini(hwmgr);
108
103 return ret; 109 return ret;
104} 110}
105 111
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 46410e3c7349..fb88e4e5d625 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
58 pem_unregister_interrupts(eventmgr); 58 pem_unregister_interrupts(eventmgr);
59 59
60 pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); 60 pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
61
62 if (eventmgr != NULL)
63 kfree(eventmgr);
64} 61}
65 62
66int eventmgr_init(struct pp_instance *handle) 63int eventmgr_init(struct pp_instance *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 24a16e49b571..92912ab20944 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
633 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; 633 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
634 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; 634 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
635 635
636 data->force_pcie_gen = PP_PCIEGenInvalid;
637
636 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 638 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
637 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 639 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
638 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; 640 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
@@ -1830,7 +1832,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
1830 1832
1831 PP_ASSERT_WITH_CODE(false, 1833 PP_ASSERT_WITH_CODE(false,
1832 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", 1834 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
1833 return vddci_table->entries[i].value); 1835 return vddci_table->entries[i-1].value);
1834} 1836}
1835 1837
1836static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, 1838static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index fa208ada6892..efb77eda7508 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
306{ 306{
307 PHM_FUNC_CHECK(hwmgr); 307 PHM_FUNC_CHECK(hwmgr);
308 308
309 if (hwmgr->hwmgr_func->store_cc6_data == NULL) 309 if (display_config == NULL)
310 return -EINVAL; 310 return -EINVAL;
311 311
312 hwmgr->display_config = *display_config; 312 hwmgr->display_config = *display_config;
313
314 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
315 return -EINVAL;
316
313 /* to do pass other display configuration in furture */ 317 /* to do pass other display configuration in furture */
314 318
315 if (hwmgr->hwmgr_func->store_cc6_data) 319 if (hwmgr->hwmgr_func->store_cc6_data)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1c48917da3cf..20f20e075588 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
93 if (hwmgr == NULL || hwmgr->ps == NULL) 93 if (hwmgr == NULL || hwmgr->ps == NULL)
94 return -EINVAL; 94 return -EINVAL;
95 95
96 /* do hwmgr finish*/
97 kfree(hwmgr->backend);
98
99 kfree(hwmgr->start_thermal_controller.function_list);
100
101 kfree(hwmgr->set_temperature_range.function_list);
102
96 kfree(hwmgr->ps); 103 kfree(hwmgr->ps);
97 kfree(hwmgr); 104 kfree(hwmgr);
98 return 0; 105 return 0;
@@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
462 469
463 PP_ASSERT_WITH_CODE(false, 470 PP_ASSERT_WITH_CODE(false,
464 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", 471 "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
465 return vddci_table->entries[i].value); 472 return vddci_table->entries[i-1].value);
466} 473}
467 474
468int phm_find_boot_level(void *table, 475int phm_find_boot_level(void *table,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
index 347fef127ce9..2930a3355948 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
39 uint8_t phases; 39 uint8_t phases;
40 uint8_t cks_enable; 40 uint8_t cks_enable;
41 uint8_t cks_voffset; 41 uint8_t cks_voffset;
42 uint32_t sclk_offset;
42}; 43};
43 44
44typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; 45typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index aa6be033f21b..64ee78f7d41e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -999,7 +999,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
999 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 999 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1000 (dep_table->entries[i].vddc - 1000 (dep_table->entries[i].vddc -
1001 (uint16_t)data->vddc_vddci_delta)); 1001 (uint16_t)data->vddc_vddci_delta));
1002 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1002 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1003 } 1003 }
1004 1004
1005 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) 1005 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
@@ -1296,7 +1296,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1296 } 1296 }
1297 1297
1298 mem_level->MclkFrequency = clock; 1298 mem_level->MclkFrequency = clock;
1299 mem_level->StutterEnable = 0;
1300 mem_level->EnabledForThrottle = 1; 1299 mem_level->EnabledForThrottle = 1;
1301 mem_level->EnabledForActivity = 0; 1300 mem_level->EnabledForActivity = 0;
1302 mem_level->UpHyst = 0; 1301 mem_level->UpHyst = 0;
@@ -1304,7 +1303,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1304 mem_level->VoltageDownHyst = 0; 1303 mem_level->VoltageDownHyst = 0;
1305 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1304 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1306 mem_level->StutterEnable = false; 1305 mem_level->StutterEnable = false;
1307
1308 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1306 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1309 1307
1310 data->display_timing.num_existing_displays = info.display_count; 1308 data->display_timing.num_existing_displays = info.display_count;
@@ -1363,7 +1361,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1363 * a higher state by default such that we are not effected by 1361 * a higher state by default such that we are not effected by
1364 * up threshold or and MCLK DPM latency. 1362 * up threshold or and MCLK DPM latency.
1365 */ 1363 */
1366 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; 1364 levels[0].ActivityLevel = 0x1f;
1367 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); 1365 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1368 1366
1369 data->smc_state_table.MemoryDpmLevelCount = 1367 data->smc_state_table.MemoryDpmLevelCount =
@@ -1761,12 +1759,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1761 1759
1762static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) 1760static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1763{ 1761{
1764 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, 1762 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1765 volt_with_cks, value;
1766 uint16_t clock_freq_u16;
1767 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1763 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1768 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, 1764 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1769 volt_offset = 0;
1770 struct phm_ppt_v1_information *table_info = 1765 struct phm_ppt_v1_information *table_info =
1771 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1766 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1772 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1767 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1778,50 +1773,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1778 * if the part is SS or FF. if RO >= 1660MHz, part is FF. 1773 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1779 */ 1774 */
1780 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1775 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1781 ixSMU_EFUSE_0 + (146 * 4)); 1776 ixSMU_EFUSE_0 + (67 * 4));
1782 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1783 ixSMU_EFUSE_0 + (148 * 4));
1784 efuse &= 0xFF000000; 1777 efuse &= 0xFF000000;
1785 efuse = efuse >> 24; 1778 efuse = efuse >> 24;
1786 efuse2 &= 0xF;
1787
1788 if (efuse2 == 1)
1789 ro = (2300 - 1350) * efuse / 255 + 1350;
1790 else
1791 ro = (2500 - 1000) * efuse / 255 + 1000;
1792 1779
1793 if (ro >= 1660) 1780 if (hwmgr->chip_id == CHIP_POLARIS10) {
1794 type = 0; 1781 min = 1000;
1795 else 1782 max = 2300;
1796 type = 1; 1783 } else {
1784 min = 1100;
1785 max = 2100;
1786 }
1797 1787
1798 /* Populate Stretch amount */ 1788 ro = efuse * (max -min)/255 + min;
1799 data->smc_state_table.ClockStretcherAmount = stretch_amount;
1800 1789
1801 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ 1790 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1802 for (i = 0; i < sclk_table->count; i++) { 1791 for (i = 0; i < sclk_table->count; i++) {
1803 data->smc_state_table.Sclk_CKS_masterEn0_7 |= 1792 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1804 sclk_table->entries[i].cks_enable << i; 1793 sclk_table->entries[i].cks_enable << i;
1805 volt_without_cks = (uint32_t)((14041 * 1794
1806 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / 1795 volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \
1807 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); 1796 (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100);
1808 volt_with_cks = (uint32_t)((13946 * 1797
1809 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / 1798 volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \
1810 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); 1799 (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10);
1800
1811 if (volt_without_cks >= volt_with_cks) 1801 if (volt_without_cks >= volt_with_cks)
1812 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + 1802 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1813 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); 1803 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1804
1814 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; 1805 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1815 } 1806 }
1816 1807
1817 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1818 STRETCH_ENABLE, 0x0);
1819 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1820 masterReset, 0x1);
1821 /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
1822 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1823 masterReset, 0x0);
1824
1825 /* Populate CKS Lookup Table */ 1808 /* Populate CKS Lookup Table */
1826 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1809 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1827 stretch_amount2 = 0; 1810 stretch_amount2 = 0;
@@ -1835,69 +1818,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1835 return -EINVAL); 1818 return -EINVAL);
1836 } 1819 }
1837 1820
1838 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1839 ixPWR_CKS_CNTL);
1840 value &= 0xFFC2FF87;
1841 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1842 polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
1843 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1844 polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
1845 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
1846 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
1847 if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
1848 && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
1849 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1850 value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1851 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1852 value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1853 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1854 value |= (polaris10_clock_stretch_amount_conversion
1855 [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
1856 [stretch_amount]) << 3;
1857 }
1858 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
1859 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
1860 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1861 polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1862 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1863 (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1864
1865 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1866 ixPWR_CKS_CNTL, value);
1867
1868 /* Populate DDT Lookup Table */
1869 for (i = 0; i < 4; i++) {
1870 /* Assign the minimum and maximum VID stored
1871 * in the last row of Clock Stretcher Voltage Table.
1872 */
1873 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
1874 (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
1875 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
1876 (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
1877 /* Loop through each SCLK and check the frequency
1878 * to see if it lies within the frequency for clock stretcher.
1879 */
1880 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
1881 cks_setting = 0;
1882 clock_freq = PP_SMC_TO_HOST_UL(
1883 data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
1884 /* Check the allowed frequency against the sclk level[j].
1885 * Sclk's endianness has already been converted,
1886 * and it's in 10Khz unit,
1887 * as opposed to Data table, which is in Mhz unit.
1888 */
1889 if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
1890 cks_setting |= 0x2;
1891 if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
1892 cks_setting |= 0x1;
1893 }
1894 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
1895 |= cks_setting << (j * 2);
1896 }
1897 CONVERT_FROM_HOST_TO_SMC_US(
1898 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
1899 }
1900
1901 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); 1821 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1902 value &= 0xFFFFFFFE; 1822 value &= 0xFFFFFFFE;
1903 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); 1823 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
@@ -1956,6 +1876,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1956 return 0; 1876 return 0;
1957} 1877}
1958 1878
1879
1880int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1881{
1882 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1883 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1884 int result = 0;
1885 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1886 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1887 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1888 uint32_t tmp, i;
1889 struct pp_smumgr *smumgr = hwmgr->smumgr;
1890 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1891
1892 struct phm_ppt_v1_information *table_info =
1893 (struct phm_ppt_v1_information *)hwmgr->pptable;
1894 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1895 table_info->vdd_dep_on_sclk;
1896
1897
1898 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1899 return result;
1900
1901 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1902
1903 if (0 == result) {
1904 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1905 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1906 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1907 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1908 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1909 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1910 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1911 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1912 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1913 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1914 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1915 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1916 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1917 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1918 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1919 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1920 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1921 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1922 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1923 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1924 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1925 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1926 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1927 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1928
1929 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1930 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1931 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1932 }
1933
1934 result = polaris10_read_smc_sram_dword(smumgr,
1935 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1936 &tmp, data->sram_end);
1937
1938 polaris10_copy_bytes_to_smc(smumgr,
1939 tmp,
1940 (uint8_t *)&AVFS_meanNsigma,
1941 sizeof(AVFS_meanNsigma_t),
1942 data->sram_end);
1943
1944 result = polaris10_read_smc_sram_dword(smumgr,
1945 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1946 &tmp, data->sram_end);
1947 polaris10_copy_bytes_to_smc(smumgr,
1948 tmp,
1949 (uint8_t *)&AVFS_SclkOffset,
1950 sizeof(AVFS_Sclk_Offset_t),
1951 data->sram_end);
1952
1953 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1954 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1955 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1956 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1957 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1958 }
1959 return result;
1960}
1961
1962
1959/** 1963/**
1960* Initializes the SMC table and uploads it 1964* Initializes the SMC table and uploads it
1961* 1965*
@@ -2056,6 +2060,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2056 "Failed to populate Clock Stretcher Data Table!", 2060 "Failed to populate Clock Stretcher Data Table!",
2057 return result); 2061 return result);
2058 } 2062 }
2063
2064 result = polaris10_populate_avfs_parameters(hwmgr);
2065 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
2066
2059 table->CurrSclkPllRange = 0xff; 2067 table->CurrSclkPllRange = 0xff;
2060 table->GraphicsVoltageChangeEnable = 1; 2068 table->GraphicsVoltageChangeEnable = 1;
2061 table->GraphicsThermThrottleEnable = 1; 2069 table->GraphicsThermThrottleEnable = 1;
@@ -2252,6 +2260,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2252static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 2260static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2253{ 2261{
2254 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2262 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2263 uint32_t soft_register_value = 0;
2264 uint32_t handshake_disables_offset = data->soft_regs_start
2265 + offsetof(SMU74_SoftRegisters, HandshakeDisables);
2255 2266
2256 /* enable SCLK dpm */ 2267 /* enable SCLK dpm */
2257 if (!data->sclk_dpm_key_disabled) 2268 if (!data->sclk_dpm_key_disabled)
@@ -2262,6 +2273,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2262 2273
2263 /* enable MCLK dpm */ 2274 /* enable MCLK dpm */
2264 if (0 == data->mclk_dpm_key_disabled) { 2275 if (0 == data->mclk_dpm_key_disabled) {
2276/* Disable UVD - SMU handshake for MCLK. */
2277 soft_register_value = cgs_read_ind_register(hwmgr->device,
2278 CGS_IND_REG__SMC, handshake_disables_offset);
2279 soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2280 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2281 handshake_disables_offset, soft_register_value);
2265 2282
2266 PP_ASSERT_WITH_CODE( 2283 PP_ASSERT_WITH_CODE(
2267 (0 == smum_send_msg_to_smc(hwmgr->smumgr, 2284 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -2269,7 +2286,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2269 "Failed to enable MCLK DPM during DPM Start Function!", 2286 "Failed to enable MCLK DPM during DPM Start Function!",
2270 return -1); 2287 return -1);
2271 2288
2272
2273 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 2289 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2274 2290
2275 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); 2291 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
@@ -2606,6 +2622,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2606 2622
2607 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2623 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2608 PHM_PlatformCaps_FanSpeedInTableIsRPM); 2624 PHM_PlatformCaps_FanSpeedInTableIsRPM);
2625
2609 if (hwmgr->chip_id == CHIP_POLARIS11) 2626 if (hwmgr->chip_id == CHIP_POLARIS11)
2610 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2627 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2611 PHM_PlatformCaps_SPLLShutdownSupport); 2628 PHM_PlatformCaps_SPLLShutdownSupport);
@@ -2938,6 +2955,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2938 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; 2955 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
2939 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; 2956 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
2940 2957
2958 data->enable_tdc_limit_feature = true;
2959 data->enable_pkg_pwr_tracking_feature = true;
2960 data->force_pcie_gen = PP_PCIEGenInvalid;
2961 data->mclk_stutter_mode_threshold = 40000;
2962
2941 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 2963 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2942 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 2964 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
2943 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; 2965 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
@@ -2962,6 +2984,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2962 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; 2984 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
2963 } 2985 }
2964 2986
2987 if (table_info->cac_dtp_table->usClockStretchAmount != 0)
2988 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2989 PHM_PlatformCaps_ClockStretcher);
2990
2965 polaris10_set_features_platform_caps(hwmgr); 2991 polaris10_set_features_platform_caps(hwmgr);
2966 2992
2967 polaris10_init_dpm_defaults(hwmgr); 2993 polaris10_init_dpm_defaults(hwmgr);
@@ -3520,10 +3546,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3520 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3546 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3521 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3547 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3522 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3548 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3523 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 3549 PPTable_Generic_SubTable_Header *sclk_dep_table =
3524 (ATOM_Tonga_SCLK_Dependency_Table *) 3550 (PPTable_Generic_SubTable_Header *)
3525 (((unsigned long)powerplay_table) + 3551 (((unsigned long)powerplay_table) +
3526 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3552 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3553
3527 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3554 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3528 (ATOM_Tonga_MCLK_Dependency_Table *) 3555 (ATOM_Tonga_MCLK_Dependency_Table *)
3529 (((unsigned long)powerplay_table) + 3556 (((unsigned long)powerplay_table) +
@@ -3575,7 +3602,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3575 /* Performance levels are arranged from low to high. */ 3602 /* Performance levels are arranged from low to high. */
3576 performance_level->memory_clock = mclk_dep_table->entries 3603 performance_level->memory_clock = mclk_dep_table->entries
3577 [state_entry->ucMemoryClockIndexLow].ulMclk; 3604 [state_entry->ucMemoryClockIndexLow].ulMclk;
3578 performance_level->engine_clock = sclk_dep_table->entries 3605 if (sclk_dep_table->ucRevId == 0)
3606 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3607 [state_entry->ucEngineClockIndexLow].ulSclk;
3608 else if (sclk_dep_table->ucRevId == 1)
3609 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3579 [state_entry->ucEngineClockIndexLow].ulSclk; 3610 [state_entry->ucEngineClockIndexLow].ulSclk;
3580 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3611 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3581 state_entry->ucPCIEGenLow); 3612 state_entry->ucPCIEGenLow);
@@ -3586,8 +3617,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3586 [polaris10_power_state->performance_level_count++]); 3617 [polaris10_power_state->performance_level_count++]);
3587 performance_level->memory_clock = mclk_dep_table->entries 3618 performance_level->memory_clock = mclk_dep_table->entries
3588 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3619 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3589 performance_level->engine_clock = sclk_dep_table->entries 3620
3621 if (sclk_dep_table->ucRevId == 0)
3622 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3590 [state_entry->ucEngineClockIndexHigh].ulSclk; 3623 [state_entry->ucEngineClockIndexHigh].ulSclk;
3624 else if (sclk_dep_table->ucRevId == 1)
3625 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3626 [state_entry->ucEngineClockIndexHigh].ulSclk;
3627
3591 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3628 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3592 state_entry->ucPCIEGenHigh); 3629 state_entry->ucPCIEGenHigh);
3593 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3630 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
@@ -3645,7 +3682,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3645 switch (state->classification.ui_label) { 3682 switch (state->classification.ui_label) {
3646 case PP_StateUILabel_Performance: 3683 case PP_StateUILabel_Performance:
3647 data->use_pcie_performance_levels = true; 3684 data->use_pcie_performance_levels = true;
3648
3649 for (i = 0; i < ps->performance_level_count; i++) { 3685 for (i = 0; i < ps->performance_level_count; i++) {
3650 if (data->pcie_gen_performance.max < 3686 if (data->pcie_gen_performance.max <
3651 ps->performance_levels[i].pcie_gen) 3687 ps->performance_levels[i].pcie_gen)
@@ -3661,7 +3697,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3661 ps->performance_levels[i].pcie_lane) 3697 ps->performance_levels[i].pcie_lane)
3662 data->pcie_lane_performance.max = 3698 data->pcie_lane_performance.max =
3663 ps->performance_levels[i].pcie_lane; 3699 ps->performance_levels[i].pcie_lane;
3664
3665 if (data->pcie_lane_performance.min > 3700 if (data->pcie_lane_performance.min >
3666 ps->performance_levels[i].pcie_lane) 3701 ps->performance_levels[i].pcie_lane)
3667 data->pcie_lane_performance.min = 3702 data->pcie_lane_performance.min =
@@ -4187,12 +4222,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4187{ 4222{
4188 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4223 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4189 uint32_t mm_boot_level_offset, mm_boot_level_value; 4224 uint32_t mm_boot_level_offset, mm_boot_level_value;
4190 struct phm_ppt_v1_information *table_info =
4191 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4192 4225
4193 if (!bgate) { 4226 if (!bgate) {
4194 data->smc_state_table.SamuBootLevel = 4227 data->smc_state_table.SamuBootLevel = 0;
4195 (uint8_t) (table_info->mm_dep_table->count - 1);
4196 mm_boot_level_offset = data->dpm_table_start + 4228 mm_boot_level_offset = data->dpm_table_start +
4197 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); 4229 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
4198 mm_boot_level_offset /= 4; 4230 mm_boot_level_offset /= 4;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index beedf35cbfa6..d717789441f5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -312,6 +312,9 @@ struct polaris10_hwmgr {
312 312
313 /* soft pptable for re-uploading into smu */ 313 /* soft pptable for re-uploading into smu */
314 void *soft_pp_table; 314 void *soft_pp_table;
315
316 uint32_t avfs_vdroop_override_setting;
317 bool apply_avfs_cks_off_voltage;
315}; 318};
316 319
317/* To convert to Q8.8 format for firmware */ 320/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index 0b99ab3ba0c5..ae96f14b827c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
286 286
287 if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, 287 if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
288 (uint8_t *)&data->power_tune_table, 288 (uint8_t *)&data->power_tune_table,
289 sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) 289 (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
290 PP_ASSERT_WITH_CODE(false, 290 PP_ASSERT_WITH_CODE(false,
291 "Attempt to download PmFuseTable Failed!", 291 "Attempt to download PmFuseTable Failed!",
292 return -EINVAL); 292 return -EINVAL);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
index aba167f7d167..b206632d4650 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
@@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
625 int ret; 625 int ret;
626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); 626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); 627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
628 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
628 629
629 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) 630 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
630 return 0; 631 return 0;
631 632
633 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
634 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
635
632 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? 636 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
633 0 : -1; 637 0 : -1;
634 638
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 58742e0d1492..a3c38bbd1e94 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
44 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; 44 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
45} 45}
46 46
47bool acpi_atcs_notify_pcie_device_ready(void *device)
48{
49 int32_t temp_buffer = 1;
50
51 return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
52 ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
53 &temp_buffer,
54 NULL,
55 0,
56 sizeof(temp_buffer),
57 0);
58}
59
60
47int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) 61int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
48{ 62{
49 struct atcs_pref_req_input atcs_input; 63 struct atcs_pref_req_input atcs_input;
@@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
52 int result; 66 int result;
53 struct cgs_system_info info = {0}; 67 struct cgs_system_info info = {0};
54 68
55 if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) 69 if( 0 != acpi_atcs_notify_pcie_device_ready(device))
56 return -EINVAL; 70 return -EINVAL;
57 71
58 info.size = sizeof(struct cgs_system_info); 72 info.size = sizeof(struct cgs_system_info);
@@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
77 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, 91 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
78 &atcs_input, 92 &atcs_input,
79 &atcs_output, 93 &atcs_output,
80 0, 94 1,
81 sizeof(atcs_input), 95 sizeof(atcs_input),
82 sizeof(atcs_output)); 96 sizeof(atcs_output));
83 if (result != 0) 97 if (result != 0)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index da9f5f1b6dc2..bf4e18fd3872 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
1302 1302
1303 return 0; 1303 return 0;
1304} 1304}
1305
1306int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
1307{
1308 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1309
1310 if (param == NULL)
1311 return -EINVAL;
1312
1313 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1314 cgs_atom_get_data_table(hwmgr->device,
1315 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1316 NULL, NULL, NULL);
1317 if (!profile)
1318 return -1;
1319
1320 param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
1321 param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
1322 param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
1323 param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
1324 param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
1325 param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
1326 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
1327 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
1328 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
1329 param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
1330 param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
1331 param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
1332 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
1333 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
1334 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
1335 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
1336 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
1337 param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
1338 param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
1339 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1340 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1341 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1342 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1343 param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
1344 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1345
1346 return 0;
1347}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index d24ebb566905..248c5db5f380 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment {
250}; 250};
251typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; 251typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
252 252
253struct pp_atom_ctrl__avfs_parameters {
254 uint32_t ulAVFS_meanNsigma_Acontant0;
255 uint32_t ulAVFS_meanNsigma_Acontant1;
256 uint32_t ulAVFS_meanNsigma_Acontant2;
257 uint16_t usAVFS_meanNsigma_DC_tol_sigma;
258 uint16_t usAVFS_meanNsigma_Platform_mean;
259 uint16_t usAVFS_meanNsigma_Platform_sigma;
260 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0;
261 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1;
262 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2;
263 uint32_t ulGB_VDROOP_TABLE_CKSON_a0;
264 uint32_t ulGB_VDROOP_TABLE_CKSON_a1;
265 uint32_t ulGB_VDROOP_TABLE_CKSON_a2;
266 uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
267 uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2;
268 uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b;
269 uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1;
270 uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2;
271 uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b;
272 uint16_t usMaxVoltage_0_25mv;
273 uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF;
274 uint8_t ucEnableGB_VDROOP_TABLE_CKSON;
275 uint8_t ucEnableGB_FUSE_TABLE_CKSOFF;
276 uint8_t ucEnableGB_FUSE_TABLE_CKSON;
277 uint16_t usPSM_Age_ComFactor;
278 uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage;
279 uint8_t ucReserved;
280};
281
253extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); 282extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
254extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 283extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
255extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); 284extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
@@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo
278extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 307extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
279 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 308 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
280extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); 309extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
310
311extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
312
281#endif 313#endif
282 314
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 16fed487973b..233eb7f36c1d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2847 } 2847 }
2848 } 2848 }
2849 2849
2850 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
2851 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
2852 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
2853 /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
2854 /* param1 is for corresponding std voltage */
2855 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
2856 }
2857 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
2858
2859 if (NULL != allowed_vdd_mclk_table) {
2860 /* Initialize Vddci DPM table based on allow Mclk values */
2861 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
2862 data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
2863 data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
2864 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
2865 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
2866 }
2867 data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
2868 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
2869 }
2870
2871 /* setup PCIE gen speed levels*/ 2850 /* setup PCIE gen speed levels*/
2872 tonga_setup_default_pcie_tables(hwmgr); 2851 tonga_setup_default_pcie_tables(hwmgr);
2873 2852
@@ -4510,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4510 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; 4489 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
4511 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; 4490 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
4512 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; 4491 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
4492 data->force_pcie_gen = PP_PCIEGenInvalid;
4513 4493
4514 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 4494 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4515 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { 4495 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
index 1b44f4e9b8f5..f127198aafc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
198} ATOM_Tonga_SCLK_Dependency_Table; 198} ATOM_Tonga_SCLK_Dependency_Table;
199 199
200typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
201 UCHAR ucVddInd; /* Base voltage */
202 USHORT usVddcOffset; /* Offset relative to base voltage */
203 ULONG ulSclk;
204 USHORT usEdcCurrent;
205 UCHAR ucReliabilityTemperature;
206 UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
207 ULONG ulSclkOffset;
208} ATOM_Polaris_SCLK_Dependency_Record;
209
210typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
211 UCHAR ucRevId;
212 UCHAR ucNumEntries; /* Number of entries. */
213 ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
214} ATOM_Polaris_SCLK_Dependency_Table;
215
200typedef struct _ATOM_Tonga_PCIE_Record { 216typedef struct _ATOM_Tonga_PCIE_Record {
201 UCHAR ucPCIEGenSpeed; 217 UCHAR ucPCIEGenSpeed;
202 UCHAR usPCIELaneWidth; 218 UCHAR usPCIELaneWidth;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 10e3630ee39d..671fdb4d615a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
408static int get_sclk_voltage_dependency_table( 408static int get_sclk_voltage_dependency_table(
409 struct pp_hwmgr *hwmgr, 409 struct pp_hwmgr *hwmgr,
410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, 410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
411 const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table 411 const PPTable_Generic_SubTable_Header *sclk_dep_table
412 ) 412 )
413{ 413{
414 uint32_t table_size, i; 414 uint32_t table_size, i;
415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table; 415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
416 416
417 PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), 417 if (sclk_dep_table->ucRevId < 1) {
418 "Invalid PowerPlay Table!", return -1); 418 const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
419 (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
419 420
420 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 421 PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
421 * sclk_dep_table->ucNumEntries; 422 "Invalid PowerPlay Table!", return -1);
422 423
423 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 424 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
424 kzalloc(table_size, GFP_KERNEL); 425 * tonga_table->ucNumEntries;
425 426
426 if (NULL == sclk_table) 427 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
427 return -ENOMEM; 428 kzalloc(table_size, GFP_KERNEL);
428 429
429 memset(sclk_table, 0x00, table_size); 430 if (NULL == sclk_table)
430 431 return -ENOMEM;
431 sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; 432
432 433 memset(sclk_table, 0x00, table_size);
433 for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { 434
434 sclk_table->entries[i].vddInd = 435 sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
435 sclk_dep_table->entries[i].ucVddInd; 436
436 sclk_table->entries[i].vdd_offset = 437 for (i = 0; i < tonga_table->ucNumEntries; i++) {
437 sclk_dep_table->entries[i].usVddcOffset; 438 sclk_table->entries[i].vddInd =
438 sclk_table->entries[i].clk = 439 tonga_table->entries[i].ucVddInd;
439 sclk_dep_table->entries[i].ulSclk; 440 sclk_table->entries[i].vdd_offset =
440 sclk_table->entries[i].cks_enable = 441 tonga_table->entries[i].usVddcOffset;
441 (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 442 sclk_table->entries[i].clk =
442 sclk_table->entries[i].cks_voffset = 443 tonga_table->entries[i].ulSclk;
443 (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 444 sclk_table->entries[i].cks_enable =
444 } 445 (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
446 sclk_table->entries[i].cks_voffset =
447 (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
448 }
449 } else {
450 const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
451 (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
452
453 PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
454 "Invalid PowerPlay Table!", return -1);
455
456 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
457 * polaris_table->ucNumEntries;
458
459 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
460 kzalloc(table_size, GFP_KERNEL);
445 461
462 if (NULL == sclk_table)
463 return -ENOMEM;
464
465 memset(sclk_table, 0x00, table_size);
466
467 sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
468
469 for (i = 0; i < polaris_table->ucNumEntries; i++) {
470 sclk_table->entries[i].vddInd =
471 polaris_table->entries[i].ucVddInd;
472 sclk_table->entries[i].vdd_offset =
473 polaris_table->entries[i].usVddcOffset;
474 sclk_table->entries[i].clk =
475 polaris_table->entries[i].ulSclk;
476 sclk_table->entries[i].cks_enable =
477 (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
478 sclk_table->entries[i].cks_voffset =
479 (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
480 sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
481 }
482 }
446 *pp_tonga_sclk_dep_table = sclk_table; 483 *pp_tonga_sclk_dep_table = sclk_table;
447 484
448 return 0; 485 return 0;
@@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
708 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 745 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
709 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 746 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
710 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 747 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
711 const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 748 const PPTable_Generic_SubTable_Header *sclk_dep_table =
712 (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 749 (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
713 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 750 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
714 const ATOM_Tonga_Hard_Limit_Table *pHardLimits = 751 const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
715 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + 752 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
@@ -1040,48 +1077,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1040 struct phm_ppt_v1_information *pp_table_information = 1077 struct phm_ppt_v1_information *pp_table_information =
1041 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1078 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1042 1079
1043 if (NULL != hwmgr->soft_pp_table) { 1080 if (NULL != hwmgr->soft_pp_table)
1044 kfree(hwmgr->soft_pp_table);
1045 hwmgr->soft_pp_table = NULL; 1081 hwmgr->soft_pp_table = NULL;
1046 }
1047 1082
1048 if (NULL != pp_table_information->vdd_dep_on_sclk) 1083 kfree(pp_table_information->vdd_dep_on_sclk);
1049 pp_table_information->vdd_dep_on_sclk = NULL; 1084 pp_table_information->vdd_dep_on_sclk = NULL;
1050 1085
1051 if (NULL != pp_table_information->vdd_dep_on_mclk) 1086 kfree(pp_table_information->vdd_dep_on_mclk);
1052 pp_table_information->vdd_dep_on_mclk = NULL; 1087 pp_table_information->vdd_dep_on_mclk = NULL;
1053 1088
1054 if (NULL != pp_table_information->valid_mclk_values) 1089 kfree(pp_table_information->valid_mclk_values);
1055 pp_table_information->valid_mclk_values = NULL; 1090 pp_table_information->valid_mclk_values = NULL;
1056 1091
1057 if (NULL != pp_table_information->valid_sclk_values) 1092 kfree(pp_table_information->valid_sclk_values);
1058 pp_table_information->valid_sclk_values = NULL; 1093 pp_table_information->valid_sclk_values = NULL;
1059 1094
1060 if (NULL != pp_table_information->vddc_lookup_table) 1095 kfree(pp_table_information->vddc_lookup_table);
1061 pp_table_information->vddc_lookup_table = NULL; 1096 pp_table_information->vddc_lookup_table = NULL;
1062 1097
1063 if (NULL != pp_table_information->vddgfx_lookup_table) 1098 kfree(pp_table_information->vddgfx_lookup_table);
1064 pp_table_information->vddgfx_lookup_table = NULL; 1099 pp_table_information->vddgfx_lookup_table = NULL;
1065 1100
1066 if (NULL != pp_table_information->mm_dep_table) 1101 kfree(pp_table_information->mm_dep_table);
1067 pp_table_information->mm_dep_table = NULL; 1102 pp_table_information->mm_dep_table = NULL;
1068 1103
1069 if (NULL != pp_table_information->cac_dtp_table) 1104 kfree(pp_table_information->cac_dtp_table);
1070 pp_table_information->cac_dtp_table = NULL; 1105 pp_table_information->cac_dtp_table = NULL;
1071 1106
1072 if (NULL != hwmgr->dyn_state.cac_dtp_table) 1107 kfree(hwmgr->dyn_state.cac_dtp_table);
1073 hwmgr->dyn_state.cac_dtp_table = NULL; 1108 hwmgr->dyn_state.cac_dtp_table = NULL;
1074 1109
1075 if (NULL != pp_table_information->ppm_parameter_table) 1110 kfree(pp_table_information->ppm_parameter_table);
1076 pp_table_information->ppm_parameter_table = NULL; 1111 pp_table_information->ppm_parameter_table = NULL;
1077 1112
1078 if (NULL != pp_table_information->pcie_table) 1113 kfree(pp_table_information->pcie_table);
1079 pp_table_information->pcie_table = NULL; 1114 pp_table_information->pcie_table = NULL;
1080 1115
1081 if (NULL != hwmgr->pptable) { 1116 kfree(hwmgr->pptable);
1082 kfree(hwmgr->pptable); 1117 hwmgr->pptable = NULL;
1083 hwmgr->pptable = NULL;
1084 }
1085 1118
1086 return result; 1119 return result;
1087} 1120}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
index 0c6a413eaa5b..d41d37ab5b7c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
@@ -27,6 +27,7 @@
27 27
28#pragma pack(push, 1) 28#pragma pack(push, 1)
29 29
30#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
30 31
31#define PPSMC_SWSTATE_FLAG_DC 0x01 32#define PPSMC_SWSTATE_FLAG_DC 0x01
32#define PPSMC_SWSTATE_FLAG_UVD 0x02 33#define PPSMC_SWSTATE_FLAG_UVD 0x02
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3bd5e69b9045..3df5de2cdab0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
26extern int acpi_pcie_perf_request(void *device, 26extern int acpi_pcie_perf_request(void *device,
27 uint8_t perf_req, 27 uint8_t perf_req,
28 bool advertise); 28 bool advertise);
29extern bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
index 1a12d85b8e97..fd10a9fa843d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
@@ -34,6 +34,30 @@
34#define SMU__NUM_LCLK_DPM_LEVELS 8 34#define SMU__NUM_LCLK_DPM_LEVELS 8
35#define SMU__NUM_PCIE_DPM_LEVELS 8 35#define SMU__NUM_PCIE_DPM_LEVELS 8
36 36
37#define EXP_M1 35
38#define EXP_M2 92821
39#define EXP_B 66629747
40
41#define EXP_M1_1 365
42#define EXP_M2_1 658700
43#define EXP_B_1 305506134
44
45#define EXP_M1_2 189
46#define EXP_M2_2 379692
47#define EXP_B_2 194609469
48
49#define EXP_M1_3 99
50#define EXP_M2_3 217915
51#define EXP_B_3 122255994
52
53#define EXP_M1_4 51
54#define EXP_M2_4 122643
55#define EXP_B_4 74893384
56
57#define EXP_M1_5 423
58#define EXP_M2_5 1103326
59#define EXP_B_5 728122621
60
37enum SID_OPTION { 61enum SID_OPTION {
38 SID_OPTION_HI, 62 SID_OPTION_HI,
39 SID_OPTION_LO, 63 SID_OPTION_LO,
@@ -548,20 +572,20 @@ struct SMU74_Firmware_Header {
548 uint32_t CacConfigTable; 572 uint32_t CacConfigTable;
549 uint32_t CacStatusTable; 573 uint32_t CacStatusTable;
550 574
551
552 uint32_t mcRegisterTable; 575 uint32_t mcRegisterTable;
553 576
554
555 uint32_t mcArbDramTimingTable; 577 uint32_t mcArbDramTimingTable;
556 578
557
558
559
560 uint32_t PmFuseTable; 579 uint32_t PmFuseTable;
561 uint32_t Globals; 580 uint32_t Globals;
562 uint32_t ClockStretcherTable; 581 uint32_t ClockStretcherTable;
563 uint32_t VftTable; 582 uint32_t VftTable;
564 uint32_t Reserved[21]; 583 uint32_t Reserved1;
584 uint32_t AvfsTable;
585 uint32_t AvfsCksOffGbvTable;
586 uint32_t AvfsMeanNSigma;
587 uint32_t AvfsSclkOffsetTable;
588 uint32_t Reserved[16];
565 uint32_t Signature; 589 uint32_t Signature;
566}; 590};
567 591
@@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */
701struct SMU_ClockStretcherDataTableEntry { 725struct SMU_ClockStretcherDataTableEntry {
702 uint8_t minVID; 726 uint8_t minVID;
703 uint8_t maxVID; 727 uint8_t maxVID;
704
705
706 uint16_t setting; 728 uint16_t setting;
707}; 729};
708typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; 730typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
@@ -769,6 +791,43 @@ struct VFT_TABLE_t {
769typedef struct VFT_TABLE_t VFT_TABLE_t; 791typedef struct VFT_TABLE_t VFT_TABLE_t;
770 792
771 793
794/* Total margin, root mean square of Fmax + DC + Platform */
795struct AVFS_Margin_t {
796 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
797};
798typedef struct AVFS_Margin_t AVFS_Margin_t;
799
800#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
801#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
802
803struct GB_VDROOP_TABLE_t {
804 int32_t a0;
805 int32_t a1;
806 int32_t a2;
807 uint32_t spare;
808};
809typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
810
811struct AVFS_CksOff_Gbv_t {
812 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
813};
814typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
815
816struct AVFS_meanNsigma_t {
817 uint32_t Aconstant[3];
818 uint16_t DC_tol_sigma;
819 uint16_t Platform_mean;
820 uint16_t Platform_sigma;
821 uint16_t PSM_Age_CompFactor;
822 uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
823};
824typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
825
826struct AVFS_Sclk_Offset_t {
827 uint16_t Sclk_Offset[8];
828};
829typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
830
772#endif 831#endif
773 832
774 833
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
index 0dfe82336dc7..b85ff5400e57 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
@@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo {
223 223
224typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; 224typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
225 225
226struct SMU_QuadraticCoeffs {
227 int32_t m1;
228 uint32_t b;
229
230 int16_t m2;
231 uint8_t m1_shift;
232 uint8_t m2_shift;
233};
234typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
235
226struct SMU74_Discrete_DpmTable { 236struct SMU74_Discrete_DpmTable {
227 237
228 SMU74_PIDController GraphicsPIDController; 238 SMU74_PIDController GraphicsPIDController;
@@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable {
258 uint8_t ThermOutPolarity; 268 uint8_t ThermOutPolarity;
259 uint8_t ThermOutMode; 269 uint8_t ThermOutMode;
260 uint8_t BootPhases; 270 uint8_t BootPhases;
261 uint32_t Reserved[4]; 271
272 uint8_t VRHotLevel;
273 uint8_t Reserved1[3];
274 uint16_t FanStartTemperature;
275 uint16_t FanStopTemperature;
276 uint16_t MaxVoltage;
277 uint16_t Reserved2;
278 uint32_t Reserved[1];
262 279
263 SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; 280 SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
264 SMU74_Discrete_MemoryLevel MemoryACPILevel; 281 SMU74_Discrete_MemoryLevel MemoryACPILevel;
@@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable {
347 364
348 uint32_t CurrSclkPllRange; 365 uint32_t CurrSclkPllRange;
349 sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; 366 sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
367 GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
368 SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
350}; 369};
351 370
352typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; 371typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
@@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard {
550 569
551typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; 570typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
552 571
553struct SMU_QuadraticCoeffs {
554 int32_t m1;
555 uint32_t b;
556
557 int16_t m2;
558 uint8_t m1_shift;
559 uint8_t m2_shift;
560};
561typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
562
563struct SMU74_Discrete_PmFuses { 572struct SMU74_Discrete_PmFuses {
564 uint8_t BapmVddCVidHiSidd[8]; 573 uint8_t BapmVddCVidHiSidd[8];
565 uint8_t BapmVddCVidLoSidd[8]; 574 uint8_t BapmVddCVidLoSidd[8];
@@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
821#define DB_PCC_SHIFT 26 830#define DB_PCC_SHIFT 26
822#define DB_EDC_SHIFT 27 831#define DB_EDC_SHIFT 27
823 832
833#define BTCGB0_Vdroop_Enable_MASK 0x1
834#define BTCGB1_Vdroop_Enable_MASK 0x2
835#define AVFSGB0_Vdroop_Enable_MASK 0x4
836#define AVFSGB1_Vdroop_Enable_MASK 0x8
837
838#define BTCGB0_Vdroop_Enable_SHIFT 0
839#define BTCGB1_Vdroop_Enable_SHIFT 1
840#define AVFSGB0_Vdroop_Enable_SHIFT 2
841#define AVFSGB1_Vdroop_Enable_SHIFT 3
842
843
824#pragma pack(pop) 844#pragma pack(pop)
825 845
826 846
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 673a75c74e18..8e52a2e82db5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
1006 1006
1007static int fiji_smu_fini(struct pp_smumgr *smumgr) 1007static int fiji_smu_fini(struct pp_smumgr *smumgr)
1008{ 1008{
1009 struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
1010
1011 smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
1012
1009 if (smumgr->backend) { 1013 if (smumgr->backend) {
1010 kfree(smumgr->backend); 1014 kfree(smumgr->backend);
1011 smumgr->backend = NULL; 1015 smumgr->backend = NULL;
1012 } 1016 }
1017
1018 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
1013 return 0; 1019 return 0;
1014} 1020}
1015 1021
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index de618ead9db8..5dba7c509710 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -52,19 +52,18 @@
52static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { 52static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
53 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ 53 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
54 /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ 54 /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
55 { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, 55 { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
56 { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, 56 { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
57 { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, 57 { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
58 { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, 58 { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
59 { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, 59 { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
60 { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, 60 { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
61 { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, 61 { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
62 { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } 62 { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
63}; 63};
64 64
65static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = 65static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
66 {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, 66 {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
67 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
68 67
69/** 68/**
70* Set the address for reading/writing the SMC SRAM space. 69* Set the address for reading/writing the SMC SRAM space.
@@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
219 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); 218 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
220} 219}
221 220
221static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
222{
223 uint32_t efuse;
224
225 efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
226 efuse &= 0x00000001;
227 if (efuse)
228 return true;
229
230 return false;
231}
232
222/** 233/**
223* Send a message to the SMC, and wait for its response. 234* Send a message to the SMC, and wait for its response.
224* 235*
@@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
228*/ 239*/
229int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) 240int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
230{ 241{
242 int ret;
243
231 if (!polaris10_is_smc_ram_running(smumgr)) 244 if (!polaris10_is_smc_ram_running(smumgr))
232 return -1; 245 return -1;
233 246
247
234 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); 248 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
235 249
236 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) 250 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
237 printk("Failed to send Previous Message.\n");
238 251
252 if (ret != 1)
253 printk("\n failed to send pre message %x ret is %d \n", msg, ret);
239 254
240 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); 255 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
241 256
242 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); 257 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
243 258
244 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) 259 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
245 printk("Failed to send Message.\n"); 260
261 if (ret != 1)
262 printk("\n failed to send message %x ret is %d \n", msg, ret);
246 263
247 return 0; 264 return 0;
248} 265}
@@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
469 kfree(smumgr->backend); 486 kfree(smumgr->backend);
470 smumgr->backend = NULL; 487 smumgr->backend = NULL;
471 } 488 }
489 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
472 return 0; 490 return 0;
473} 491}
474 492
@@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
952 (cgs_handle_t)smu_data->smu_buffer.handle); 970 (cgs_handle_t)smu_data->smu_buffer.handle);
953 return -1;); 971 return -1;);
954 972
973 if (polaris10_is_hw_avfs_present(smumgr))
974 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
975 else
976 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
977
955 return 0; 978 return 0;
956} 979}
957 980
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c483baf6b4fb..0728c1e3d97a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
81 81
82int smum_fini(struct pp_smumgr *smumgr) 82int smum_fini(struct pp_smumgr *smumgr)
83{ 83{
84 kfree(smumgr->device);
84 kfree(smumgr); 85 kfree(smumgr);
85 return 0; 86 return 0;
86} 87}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 32820b680d88..b22722eabafc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
328 328
329static int tonga_smu_fini(struct pp_smumgr *smumgr) 329static int tonga_smu_fini(struct pp_smumgr *smumgr)
330{ 330{
331 struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
332
333 smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
334 smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
335
331 if (smumgr->backend != NULL) { 336 if (smumgr->backend != NULL) {
332 kfree(smumgr->backend); 337 kfree(smumgr->backend);
333 smumgr->backend = NULL; 338 smumgr->backend = NULL;
334 } 339 }
340
341 cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
335 return 0; 342 return 0;
336} 343}
337 344
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
index d48fda70f857..73de56a0139a 100644
--- a/drivers/gpu/drm/arc/Makefile
+++ b/drivers/gpu/drm/arc/Makefile
@@ -1,2 +1,2 @@
1arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o 1arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
2obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o 2obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index 86574b698a78..e8fcf3ab1d9a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
22 struct clk *clk; 22 struct clk *clk;
23 struct drm_fbdev_cma *fbdev; 23 struct drm_fbdev_cma *fbdev;
24 struct drm_framebuffer *fb; 24 struct drm_framebuffer *fb;
25 struct list_head event_list;
26 struct drm_crtc crtc; 25 struct drm_crtc crtc;
27 struct drm_plane *plane; 26 struct drm_plane *plane;
28}; 27};
@@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
43 42
44int arc_pgu_setup_crtc(struct drm_device *dev); 43int arc_pgu_setup_crtc(struct drm_device *dev);
45int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np); 44int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
45int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
46struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev, 46struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
47 unsigned int preferred_bpp, unsigned int num_crtc, 47 unsigned int preferred_bpp, unsigned int num_crtc,
48 unsigned int max_conn_count); 48 unsigned int max_conn_count);
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 92f8beff8e60..ee0a61c2861b 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, 145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
146 struct drm_crtc_state *state) 146 struct drm_crtc_state *state)
147{ 147{
148 struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); 148 struct drm_pending_vblank_event *event = crtc->state->event;
149 unsigned long flags;
150
151 if (crtc->state->event) {
152 struct drm_pending_vblank_event *event = crtc->state->event;
153 149
150 if (event) {
154 crtc->state->event = NULL; 151 crtc->state->event = NULL;
155 event->pipe = drm_crtc_index(crtc);
156
157 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
158 152
159 spin_lock_irqsave(&crtc->dev->event_lock, flags); 153 spin_lock_irq(&crtc->dev->event_lock);
160 list_add_tail(&event->base.link, &arcpgu->event_list); 154 drm_crtc_send_vblank_event(crtc, event);
161 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 155 spin_unlock_irq(&crtc->dev->event_lock);
162 } 156 }
163} 157}
164 158
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5bde0..ccbdadb108dc 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -32,17 +32,11 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
32 drm_fbdev_cma_hotplug_event(arcpgu->fbdev); 32 drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
33} 33}
34 34
35static int arcpgu_atomic_commit(struct drm_device *dev,
36 struct drm_atomic_state *state, bool async)
37{
38 return drm_atomic_helper_commit(dev, state, false);
39}
40
41static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { 35static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
42 .fb_create = drm_fb_cma_create, 36 .fb_create = drm_fb_cma_create,
43 .output_poll_changed = arcpgu_fb_output_poll_changed, 37 .output_poll_changed = arcpgu_fb_output_poll_changed,
44 .atomic_check = drm_atomic_helper_check, 38 .atomic_check = drm_atomic_helper_check,
45 .atomic_commit = arcpgu_atomic_commit, 39 .atomic_commit = drm_atomic_helper_commit,
46}; 40};
47 41
48static void arcpgu_setup_mode_config(struct drm_device *drm) 42static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -81,22 +75,6 @@ static const struct file_operations arcpgu_drm_ops = {
81 .mmap = arcpgu_gem_mmap, 75 .mmap = arcpgu_gem_mmap,
82}; 76};
83 77
84static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
85{
86 struct arcpgu_drm_private *arcpgu = drm->dev_private;
87 struct drm_pending_vblank_event *e, *t;
88 unsigned long flags;
89
90 spin_lock_irqsave(&drm->event_lock, flags);
91 list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
92 if (e->base.file_priv != file)
93 continue;
94 list_del(&e->base.link);
95 e->base.destroy(&e->base);
96 }
97 spin_unlock_irqrestore(&drm->event_lock, flags);
98}
99
100static void arcpgu_lastclose(struct drm_device *drm) 78static void arcpgu_lastclose(struct drm_device *drm)
101{ 79{
102 struct arcpgu_drm_private *arcpgu = drm->dev_private; 80 struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,8 +100,6 @@ static int arcpgu_load(struct drm_device *drm)
122 if (IS_ERR(arcpgu->clk)) 100 if (IS_ERR(arcpgu->clk))
123 return PTR_ERR(arcpgu->clk); 101 return PTR_ERR(arcpgu->clk);
124 102
125 INIT_LIST_HEAD(&arcpgu->event_list);
126
127 arcpgu_setup_mode_config(drm); 103 arcpgu_setup_mode_config(drm);
128 104
129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 105 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -149,15 +125,16 @@ static int arcpgu_load(struct drm_device *drm)
149 125
150 /* find the encoder node and initialize it */ 126 /* find the encoder node and initialize it */
151 encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); 127 encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
152 if (!encoder_node) { 128 if (encoder_node) {
153 dev_err(drm->dev, "failed to get an encoder slave node\n"); 129 ret = arcpgu_drm_hdmi_init(drm, encoder_node);
154 return -ENODEV; 130 if (ret < 0)
131 return ret;
132 } else {
133 ret = arcpgu_drm_sim_init(drm, 0);
134 if (ret < 0)
135 return ret;
155 } 136 }
156 137
157 ret = arcpgu_drm_hdmi_init(drm, encoder_node);
158 if (ret < 0)
159 return ret;
160
161 drm_mode_config_reset(drm); 138 drm_mode_config_reset(drm);
162 drm_kms_helper_poll_init(drm); 139 drm_kms_helper_poll_init(drm);
163 140
@@ -192,7 +169,6 @@ int arcpgu_unload(struct drm_device *drm)
192static struct drm_driver arcpgu_drm_driver = { 169static struct drm_driver arcpgu_drm_driver = {
193 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 170 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
194 DRIVER_ATOMIC, 171 DRIVER_ATOMIC,
195 .preclose = arcpgu_preclose,
196 .lastclose = arcpgu_lastclose, 172 .lastclose = arcpgu_lastclose,
197 .name = "drm-arcpgu", 173 .name = "drm-arcpgu",
198 .desc = "ARC PGU Controller", 174 .desc = "ARC PGU Controller",
@@ -207,7 +183,7 @@ static struct drm_driver arcpgu_drm_driver = {
207 .get_vblank_counter = drm_vblank_no_hw_counter, 183 .get_vblank_counter = drm_vblank_no_hw_counter,
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 184 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 185 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_free_object = drm_gem_cma_free_object, 186 .gem_free_object_unlocked = drm_gem_cma_free_object,
211 .gem_vm_ops = &drm_gem_cma_vm_ops, 187 .gem_vm_ops = &drm_gem_cma_vm_ops,
212 .gem_prime_export = drm_gem_prime_export, 188 .gem_prime_export = drm_gem_prime_export,
213 .gem_prime_import = drm_gem_prime_import, 189 .gem_prime_import = drm_gem_prime_import,
@@ -235,15 +211,8 @@ static int arcpgu_probe(struct platform_device *pdev)
235 if (ret) 211 if (ret)
236 goto err_unload; 212 goto err_unload;
237 213
238 ret = drm_connector_register_all(drm);
239 if (ret)
240 goto err_unregister;
241
242 return 0; 214 return 0;
243 215
244err_unregister:
245 drm_dev_unregister(drm);
246
247err_unload: 216err_unload:
248 arcpgu_unload(drm); 217 arcpgu_unload(drm);
249 218
@@ -257,7 +226,6 @@ static int arcpgu_remove(struct platform_device *pdev)
257{ 226{
258 struct drm_device *drm = platform_get_drvdata(pdev); 227 struct drm_device *drm = platform_get_drvdata(pdev);
259 228
260 drm_connector_unregister_all(drm);
261 drm_dev_unregister(drm); 229 drm_dev_unregister(drm);
262 arcpgu_unload(drm); 230 arcpgu_unload(drm);
263 drm_dev_unref(drm); 231 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 08b6baeb320d..b7a8b2ac4055 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
46 return sfuncs->get_modes(&slave->base, connector); 46 return sfuncs->get_modes(&slave->base, connector);
47} 47}
48 48
49struct drm_encoder *
50arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
51{
52 struct drm_encoder_slave *slave;
53 struct arcpgu_drm_connector *con =
54 container_of(connector, struct arcpgu_drm_connector, connector);
55
56 slave = con->encoder_slave;
57 if (slave == NULL) {
58 dev_err(connector->dev->dev,
59 "connector_best_encoder: cannot find slave encoder for connector\n");
60 return NULL;
61 }
62
63 return &slave->base;
64}
65
66static enum drm_connector_status 49static enum drm_connector_status
67arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) 50arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
68{ 51{
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
97static const struct drm_connector_helper_funcs 80static const struct drm_connector_helper_funcs
98arcpgu_drm_connector_helper_funcs = { 81arcpgu_drm_connector_helper_funcs = {
99 .get_modes = arcpgu_drm_connector_get_modes, 82 .get_modes = arcpgu_drm_connector_get_modes,
100 .best_encoder = arcpgu_drm_connector_best_encoder,
101}; 83};
102 84
103static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 85static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
new file mode 100644
index 000000000000..2bf06d71556a
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -0,0 +1,128 @@
1/*
2 * ARC PGU DRM driver.
3 *
4 * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_encoder_slave.h>
19#include <drm/drm_atomic_helper.h>
20
21#include "arcpgu.h"
22
23#define XRES_DEF 640
24#define YRES_DEF 480
25
26#define XRES_MAX 8192
27#define YRES_MAX 8192
28
29
30struct arcpgu_drm_connector {
31 struct drm_connector connector;
32 struct drm_encoder_slave *encoder_slave;
33};
34
35static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
36{
37 int count;
38
39 count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
40 drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
41 return count;
42}
43
44static enum drm_connector_status
45arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
46{
47 return connector_status_connected;
48}
49
50static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
51{
52 drm_connector_unregister(connector);
53 drm_connector_cleanup(connector);
54}
55
56static const struct drm_connector_helper_funcs
57arcpgu_drm_connector_helper_funcs = {
58 .get_modes = arcpgu_drm_connector_get_modes,
59};
60
61static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
62 .dpms = drm_helper_connector_dpms,
63 .reset = drm_atomic_helper_connector_reset,
64 .detect = arcpgu_drm_connector_detect,
65 .fill_modes = drm_helper_probe_single_connector_modes,
66 .destroy = arcpgu_drm_connector_destroy,
67 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
68 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
69};
70
71static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
72 .destroy = drm_encoder_cleanup,
73};
74
75int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
76{
77 struct arcpgu_drm_connector *arcpgu_connector;
78 struct drm_encoder_slave *encoder;
79 struct drm_connector *connector;
80 int ret;
81
82 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
83 if (encoder == NULL)
84 return -ENOMEM;
85
86 encoder->base.possible_crtcs = 1;
87 encoder->base.possible_clones = 0;
88
89 ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
90 DRM_MODE_ENCODER_VIRTUAL, NULL);
91 if (ret)
92 return ret;
93
94 arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
95 GFP_KERNEL);
96 if (!arcpgu_connector) {
97 ret = -ENOMEM;
98 goto error_encoder_cleanup;
99 }
100
101 connector = &arcpgu_connector->connector;
102 drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
103
104 ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
105 DRM_MODE_CONNECTOR_VIRTUAL);
106 if (ret < 0) {
107 dev_err(drm->dev, "failed to initialize drm connector\n");
108 goto error_encoder_cleanup;
109 }
110
111 ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
112 if (ret < 0) {
113 dev_err(drm->dev, "could not attach connector to encoder\n");
114 drm_connector_unregister(connector);
115 goto error_connector_cleanup;
116 }
117
118 arcpgu_connector->encoder_slave = encoder;
119
120 return 0;
121
122error_connector_cleanup:
123 drm_connector_cleanup(connector);
124
125error_encoder_cleanup:
126 drm_encoder_cleanup(&encoder->base);
127 return ret;
128}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index eaed454e043c..1b2906568a48 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -25,3 +25,19 @@ config DRM_HDLCD_SHOW_UNDERRUN
25 Enable this option to show in red colour the pixels that the 25 Enable this option to show in red colour the pixels that the
26 HDLCD device did not fetch from framebuffer due to underrun 26 HDLCD device did not fetch from framebuffer due to underrun
27 conditions. 27 conditions.
28
29config DRM_MALI_DISPLAY
30 tristate "ARM Mali Display Processor"
31 depends on DRM && OF && (ARM || ARM64)
32 depends on COMMON_CLK
33 select DRM_ARM
34 select DRM_KMS_HELPER
35 select DRM_KMS_CMA_HELPER
36 select DRM_GEM_CMA_HELPER
37 select VIDEOMODE_HELPERS
38 help
39 Choose this option if you want to compile the ARM Mali Display
40 Processor driver. It supports the DP500, DP550 and DP650 variants
41 of the hardware.
42
43 If compiled as a module it will be called mali-dp.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index 89dcb7bab93a..bb8b158ff90d 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,2 +1,4 @@
1hdlcd-y := hdlcd_drv.o hdlcd_crtc.o 1hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
2obj-$(CONFIG_DRM_HDLCD) += hdlcd.o 2obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
3mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
4obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index fef1b04c2aab..48019ae22ddb 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -33,8 +33,17 @@
33 * 33 *
34 */ 34 */
35 35
36static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
37{
38 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
39
40 /* stop the controller on cleanup */
41 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
42 drm_crtc_cleanup(crtc);
43}
44
36static const struct drm_crtc_funcs hdlcd_crtc_funcs = { 45static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
37 .destroy = drm_crtc_cleanup, 46 .destroy = hdlcd_crtc_cleanup,
38 .set_config = drm_atomic_helper_set_config, 47 .set_config = drm_atomic_helper_set_config,
39 .page_flip = drm_atomic_helper_page_flip, 48 .page_flip = drm_atomic_helper_page_flip,
40 .reset = drm_atomic_helper_crtc_reset, 49 .reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
97 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 106 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
98 struct drm_display_mode *m = &crtc->state->adjusted_mode; 107 struct drm_display_mode *m = &crtc->state->adjusted_mode;
99 struct videomode vm; 108 struct videomode vm;
100 unsigned int polarities, line_length, err; 109 unsigned int polarities, err;
101 110
102 vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; 111 vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
103 vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; 112 vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
113 if (m->flags & DRM_MODE_FLAG_PVSYNC) 122 if (m->flags & DRM_MODE_FLAG_PVSYNC)
114 polarities |= HDLCD_POLARITY_VSYNC; 123 polarities |= HDLCD_POLARITY_VSYNC;
115 124
116 line_length = crtc->primary->state->fb->pitches[0];
117
118 /* Allow max number of outstanding requests and largest burst size */ 125 /* Allow max number of outstanding requests and largest burst size */
119 hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, 126 hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
120 HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); 127 HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
121 128
122 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
123 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
124 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
125 hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); 129 hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
126 hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); 130 hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
127 hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); 131 hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
128 hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); 132 hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
133 hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
129 hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); 134 hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
130 hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); 135 hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
131 hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); 136 hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
132 hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
133 hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); 137 hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
134 138
135 err = hdlcd_set_pxl_fmt(crtc); 139 err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
144 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 148 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
145 149
146 clk_prepare_enable(hdlcd->clk); 150 clk_prepare_enable(hdlcd->clk);
151 hdlcd_crtc_mode_set_nofb(crtc);
147 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); 152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
148 drm_crtc_vblank_on(crtc);
149} 153}
150 154
151static void hdlcd_crtc_disable(struct drm_crtc *crtc) 155static void hdlcd_crtc_disable(struct drm_crtc *crtc)
152{ 156{
153 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 157 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
154 158
155 if (!crtc->primary->fb) 159 if (!crtc->state->active)
156 return; 160 return;
157 161
158 clk_disable_unprepare(hdlcd->clk);
159 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); 162 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
160 drm_crtc_vblank_off(crtc); 163 clk_disable_unprepare(hdlcd->clk);
161} 164}
162 165
163static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, 166static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,52 +182,39 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
179static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, 182static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
180 struct drm_crtc_state *state) 183 struct drm_crtc_state *state)
181{ 184{
182 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 185 struct drm_pending_vblank_event *event = crtc->state->event;
183 unsigned long flags;
184
185 if (crtc->state->event) {
186 struct drm_pending_vblank_event *event = crtc->state->event;
187 186
187 if (event) {
188 crtc->state->event = NULL; 188 crtc->state->event = NULL;
189 event->pipe = drm_crtc_index(crtc);
190
191 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
192 189
193 spin_lock_irqsave(&crtc->dev->event_lock, flags); 190 spin_lock_irq(&crtc->dev->event_lock);
194 list_add_tail(&event->base.link, &hdlcd->event_list); 191 if (drm_crtc_vblank_get(crtc) == 0)
195 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 192 drm_crtc_arm_vblank_event(crtc, event);
193 else
194 drm_crtc_send_vblank_event(crtc, event);
195 spin_unlock_irq(&crtc->dev->event_lock);
196 } 196 }
197} 197}
198 198
199static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
200 struct drm_crtc_state *state)
201{
202}
203
204static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
205 const struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 return true;
209}
210
211static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { 199static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
212 .mode_fixup = hdlcd_crtc_mode_fixup,
213 .mode_set = drm_helper_crtc_mode_set,
214 .mode_set_base = drm_helper_crtc_mode_set_base,
215 .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
216 .enable = hdlcd_crtc_enable, 200 .enable = hdlcd_crtc_enable,
217 .disable = hdlcd_crtc_disable, 201 .disable = hdlcd_crtc_disable,
218 .prepare = hdlcd_crtc_disable,
219 .commit = hdlcd_crtc_enable,
220 .atomic_check = hdlcd_crtc_atomic_check, 202 .atomic_check = hdlcd_crtc_atomic_check,
221 .atomic_begin = hdlcd_crtc_atomic_begin, 203 .atomic_begin = hdlcd_crtc_atomic_begin,
222 .atomic_flush = hdlcd_crtc_atomic_flush,
223}; 204};
224 205
225static int hdlcd_plane_atomic_check(struct drm_plane *plane, 206static int hdlcd_plane_atomic_check(struct drm_plane *plane,
226 struct drm_plane_state *state) 207 struct drm_plane_state *state)
227{ 208{
209 u32 src_w, src_h;
210
211 src_w = state->src_w >> 16;
212 src_h = state->src_h >> 16;
213
214 /* we can't do any scaling of the plane source */
215 if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
216 return -EINVAL;
217
228 return 0; 218 return 0;
229} 219}
230 220
@@ -233,20 +223,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
233{ 223{
234 struct hdlcd_drm_private *hdlcd; 224 struct hdlcd_drm_private *hdlcd;
235 struct drm_gem_cma_object *gem; 225 struct drm_gem_cma_object *gem;
226 unsigned int depth, bpp;
227 u32 src_w, src_h, dest_w, dest_h;
236 dma_addr_t scanout_start; 228 dma_addr_t scanout_start;
237 229
238 if (!plane->state->crtc || !plane->state->fb) 230 if (!plane->state->fb)
239 return; 231 return;
240 232
241 hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); 233 drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
234 src_w = plane->state->src_w >> 16;
235 src_h = plane->state->src_h >> 16;
236 dest_w = plane->state->crtc_w;
237 dest_h = plane->state->crtc_h;
242 gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); 238 gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
243 scanout_start = gem->paddr; 239 scanout_start = gem->paddr + plane->state->fb->offsets[0] +
240 plane->state->crtc_y * plane->state->fb->pitches[0] +
241 plane->state->crtc_x * bpp / 8;
242
243 hdlcd = plane->dev->dev_private;
244 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
245 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
246 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
244 hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); 247 hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
245} 248}
246 249
247static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { 250static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
248 .prepare_fb = NULL,
249 .cleanup_fb = NULL,
250 .atomic_check = hdlcd_plane_atomic_check, 251 .atomic_check = hdlcd_plane_atomic_check,
251 .atomic_update = hdlcd_plane_atomic_update, 252 .atomic_update = hdlcd_plane_atomic_update,
252}; 253};
@@ -294,16 +295,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
294 return plane; 295 return plane;
295} 296}
296 297
297void hdlcd_crtc_suspend(struct drm_crtc *crtc)
298{
299 hdlcd_crtc_disable(crtc);
300}
301
302void hdlcd_crtc_resume(struct drm_crtc *crtc)
303{
304 hdlcd_crtc_enable(crtc);
305}
306
307int hdlcd_setup_crtc(struct drm_device *drm) 298int hdlcd_setup_crtc(struct drm_device *drm)
308{ 299{
309 struct hdlcd_drm_private *hdlcd = drm->dev_private; 300 struct hdlcd_drm_private *hdlcd = drm->dev_private;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index b987c63ba8d6..74279be20b75 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
49 atomic_set(&hdlcd->dma_end_count, 0); 49 atomic_set(&hdlcd->dma_end_count, 0);
50#endif 50#endif
51 51
52 INIT_LIST_HEAD(&hdlcd->event_list);
53
54 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 52 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55 hdlcd->mmio = devm_ioremap_resource(drm->dev, res); 53 hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
56 if (IS_ERR(hdlcd->mmio)) { 54 if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
84 goto setup_fail; 82 goto setup_fail;
85 } 83 }
86 84
87 pm_runtime_enable(drm->dev);
88
89 pm_runtime_get_sync(drm->dev);
90 ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); 85 ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
91 pm_runtime_put_sync(drm->dev);
92 if (ret < 0) { 86 if (ret < 0) {
93 DRM_ERROR("failed to install IRQ handler\n"); 87 DRM_ERROR("failed to install IRQ handler\n");
94 goto irq_fail; 88 goto irq_fail;
@@ -112,17 +106,11 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
112 drm_fbdev_cma_hotplug_event(hdlcd->fbdev); 106 drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
113} 107}
114 108
115static int hdlcd_atomic_commit(struct drm_device *dev,
116 struct drm_atomic_state *state, bool nonblock)
117{
118 return drm_atomic_helper_commit(dev, state, false);
119}
120
121static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { 109static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
122 .fb_create = drm_fb_cma_create, 110 .fb_create = drm_fb_cma_create,
123 .output_poll_changed = hdlcd_fb_output_poll_changed, 111 .output_poll_changed = hdlcd_fb_output_poll_changed,
124 .atomic_check = drm_atomic_helper_check, 112 .atomic_check = drm_atomic_helper_check,
125 .atomic_commit = hdlcd_atomic_commit, 113 .atomic_commit = drm_atomic_helper_commit,
126}; 114};
127 115
128static void hdlcd_setup_mode_config(struct drm_device *drm) 116static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -164,24 +152,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
164 atomic_inc(&hdlcd->vsync_count); 152 atomic_inc(&hdlcd->vsync_count);
165 153
166#endif 154#endif
167 if (irq_status & HDLCD_INTERRUPT_VSYNC) { 155 if (irq_status & HDLCD_INTERRUPT_VSYNC)
168 bool events_sent = false;
169 unsigned long flags;
170 struct drm_pending_vblank_event *e, *t;
171
172 drm_crtc_handle_vblank(&hdlcd->crtc); 156 drm_crtc_handle_vblank(&hdlcd->crtc);
173 157
174 spin_lock_irqsave(&drm->event_lock, flags);
175 list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
176 list_del(&e->base.link);
177 drm_crtc_send_vblank_event(&hdlcd->crtc, e);
178 events_sent = true;
179 }
180 if (events_sent)
181 drm_crtc_vblank_put(&hdlcd->crtc);
182 spin_unlock_irqrestore(&drm->event_lock, flags);
183 }
184
185 /* acknowledge interrupt(s) */ 158 /* acknowledge interrupt(s) */
186 hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); 159 hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
187 160
@@ -275,6 +248,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
275static struct drm_info_list hdlcd_debugfs_list[] = { 248static struct drm_info_list hdlcd_debugfs_list[] = {
276 { "interrupt_count", hdlcd_show_underrun_count, 0 }, 249 { "interrupt_count", hdlcd_show_underrun_count, 0 },
277 { "clocks", hdlcd_show_pxlclock, 0 }, 250 { "clocks", hdlcd_show_pxlclock, 0 },
251 { "fb", drm_fb_cma_debugfs_show, 0 },
278}; 252};
279 253
280static int hdlcd_debugfs_init(struct drm_minor *minor) 254static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -316,7 +290,7 @@ static struct drm_driver hdlcd_driver = {
316 .get_vblank_counter = drm_vblank_no_hw_counter, 290 .get_vblank_counter = drm_vblank_no_hw_counter,
317 .enable_vblank = hdlcd_enable_vblank, 291 .enable_vblank = hdlcd_enable_vblank,
318 .disable_vblank = hdlcd_disable_vblank, 292 .disable_vblank = hdlcd_disable_vblank,
319 .gem_free_object = drm_gem_cma_free_object, 293 .gem_free_object_unlocked = drm_gem_cma_free_object,
320 .gem_vm_ops = &drm_gem_cma_vm_ops, 294 .gem_vm_ops = &drm_gem_cma_vm_ops,
321 .dumb_create = drm_gem_cma_dumb_create, 295 .dumb_create = drm_gem_cma_dumb_create,
322 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 296 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -357,6 +331,8 @@ static int hdlcd_drm_bind(struct device *dev)
357 return -ENOMEM; 331 return -ENOMEM;
358 332
359 drm->dev_private = hdlcd; 333 drm->dev_private = hdlcd;
334 dev_set_drvdata(dev, drm);
335
360 hdlcd_setup_mode_config(drm); 336 hdlcd_setup_mode_config(drm);
361 ret = hdlcd_load(drm, 0); 337 ret = hdlcd_load(drm, 0);
362 if (ret) 338 if (ret)
@@ -366,14 +342,18 @@ static int hdlcd_drm_bind(struct device *dev)
366 if (ret) 342 if (ret)
367 goto err_unload; 343 goto err_unload;
368 344
369 dev_set_drvdata(dev, drm);
370
371 ret = component_bind_all(dev, drm); 345 ret = component_bind_all(dev, drm);
372 if (ret) { 346 if (ret) {
373 DRM_ERROR("Failed to bind all components\n"); 347 DRM_ERROR("Failed to bind all components\n");
374 goto err_unregister; 348 goto err_unregister;
375 } 349 }
376 350
351 ret = pm_runtime_set_active(dev);
352 if (ret)
353 goto err_pm_active;
354
355 pm_runtime_enable(dev);
356
377 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 357 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
378 if (ret < 0) { 358 if (ret < 0) {
379 DRM_ERROR("failed to initialise vblank\n"); 359 DRM_ERROR("failed to initialise vblank\n");
@@ -399,16 +379,16 @@ err_fbdev:
399 drm_mode_config_cleanup(drm); 379 drm_mode_config_cleanup(drm);
400 drm_vblank_cleanup(drm); 380 drm_vblank_cleanup(drm);
401err_vblank: 381err_vblank:
382 pm_runtime_disable(drm->dev);
383err_pm_active:
402 component_unbind_all(dev, drm); 384 component_unbind_all(dev, drm);
403err_unregister: 385err_unregister:
404 drm_dev_unregister(drm); 386 drm_dev_unregister(drm);
405err_unload: 387err_unload:
406 pm_runtime_get_sync(drm->dev);
407 drm_irq_uninstall(drm); 388 drm_irq_uninstall(drm);
408 pm_runtime_put_sync(drm->dev);
409 pm_runtime_disable(drm->dev);
410 of_reserved_mem_device_release(drm->dev); 389 of_reserved_mem_device_release(drm->dev);
411err_free: 390err_free:
391 dev_set_drvdata(dev, NULL);
412 drm_dev_unref(drm); 392 drm_dev_unref(drm);
413 393
414 return ret; 394 return ret;
@@ -495,30 +475,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
495static int __maybe_unused hdlcd_pm_suspend(struct device *dev) 475static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
496{ 476{
497 struct drm_device *drm = dev_get_drvdata(dev); 477 struct drm_device *drm = dev_get_drvdata(dev);
498 struct drm_crtc *crtc; 478 struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
499 479
500 if (pm_runtime_suspended(dev)) 480 if (!hdlcd)
501 return 0; 481 return 0;
502 482
503 drm_modeset_lock_all(drm); 483 drm_kms_helper_poll_disable(drm);
504 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) 484
505 hdlcd_crtc_suspend(crtc); 485 hdlcd->state = drm_atomic_helper_suspend(drm);
506 drm_modeset_unlock_all(drm); 486 if (IS_ERR(hdlcd->state)) {
487 drm_kms_helper_poll_enable(drm);
488 return PTR_ERR(hdlcd->state);
489 }
490
507 return 0; 491 return 0;
508} 492}
509 493
510static int __maybe_unused hdlcd_pm_resume(struct device *dev) 494static int __maybe_unused hdlcd_pm_resume(struct device *dev)
511{ 495{
512 struct drm_device *drm = dev_get_drvdata(dev); 496 struct drm_device *drm = dev_get_drvdata(dev);
513 struct drm_crtc *crtc; 497 struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
514 498
515 if (!pm_runtime_suspended(dev)) 499 if (!hdlcd)
516 return 0; 500 return 0;
517 501
518 drm_modeset_lock_all(drm); 502 drm_atomic_helper_resume(drm, hdlcd->state);
519 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) 503 drm_kms_helper_poll_enable(drm);
520 hdlcd_crtc_resume(crtc); 504 pm_runtime_set_active(dev);
521 drm_modeset_unlock_all(drm); 505
522 return 0; 506 return 0;
523} 507}
524 508
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index aa234784f053..e3950a071152 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,9 @@ struct hdlcd_drm_private {
9 void __iomem *mmio; 9 void __iomem *mmio;
10 struct clk *clk; 10 struct clk *clk;
11 struct drm_fbdev_cma *fbdev; 11 struct drm_fbdev_cma *fbdev;
12 struct drm_framebuffer *fb;
13 struct list_head event_list;
14 struct drm_crtc crtc; 12 struct drm_crtc crtc;
15 struct drm_plane *plane; 13 struct drm_plane *plane;
14 struct drm_atomic_state *state;
16#ifdef CONFIG_DEBUG_FS 15#ifdef CONFIG_DEBUG_FS
17 atomic_t buffer_underrun_count; 16 atomic_t buffer_underrun_count;
18 atomic_t bus_error_count; 17 atomic_t bus_error_count;
@@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
36 35
37int hdlcd_setup_crtc(struct drm_device *dev); 36int hdlcd_setup_crtc(struct drm_device *dev);
38void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); 37void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
39void hdlcd_crtc_suspend(struct drm_crtc *crtc);
40void hdlcd_crtc_resume(struct drm_crtc *crtc);
41 38
42#endif /* __HDLCD_DRV_H__ */ 39#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
new file mode 100644
index 000000000000..08e6a71f5d05
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -0,0 +1,216 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 driver (crtc operations)
11 */
12
13#include <drm/drmP.h>
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <linux/clk.h>
19#include <video/videomode.h>
20
21#include "malidp_drv.h"
22#include "malidp_hw.h"
23
24static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
25 const struct drm_display_mode *mode,
26 struct drm_display_mode *adjusted_mode)
27{
28 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
29 struct malidp_hw_device *hwdev = malidp->dev;
30
31 /*
32 * check that the hardware can drive the required clock rate,
33 * but skip the check if the clock is meant to be disabled (req_rate = 0)
34 */
35 long rate, req_rate = mode->crtc_clock * 1000;
36
37 if (req_rate) {
38 rate = clk_round_rate(hwdev->mclk, req_rate);
39 if (rate < req_rate) {
40 DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
41 mode->crtc_clock);
42 return false;
43 }
44
45 rate = clk_round_rate(hwdev->pxlclk, req_rate);
46 if (rate != req_rate) {
47 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
48 req_rate);
49 return false;
50 }
51 }
52
53 return true;
54}
55
56static void malidp_crtc_enable(struct drm_crtc *crtc)
57{
58 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
59 struct malidp_hw_device *hwdev = malidp->dev;
60 struct videomode vm;
61
62 drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
63
64 clk_prepare_enable(hwdev->pxlclk);
65
66 /* mclk needs to be set to the same or higher rate than pxlclk */
67 clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
68 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
69
70 hwdev->modeset(hwdev, &vm);
71 hwdev->leave_config_mode(hwdev);
72 drm_crtc_vblank_on(crtc);
73}
74
75static void malidp_crtc_disable(struct drm_crtc *crtc)
76{
77 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
78 struct malidp_hw_device *hwdev = malidp->dev;
79
80 drm_crtc_vblank_off(crtc);
81 hwdev->enter_config_mode(hwdev);
82 clk_disable_unprepare(hwdev->pxlclk);
83}
84
85static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
86 struct drm_crtc_state *state)
87{
88 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
89 struct malidp_hw_device *hwdev = malidp->dev;
90 struct drm_plane *plane;
91 const struct drm_plane_state *pstate;
92 u32 rot_mem_free, rot_mem_usable;
93 int rotated_planes = 0;
94
95 /*
96 * check if there is enough rotation memory available for planes
97 * that need 90° and 270° rotation. Each plane has set its required
98 * memory size in the ->plane_check() callback, here we only make
99 * sure that the sums are less that the total usable memory.
100 *
101 * The rotation memory allocation algorithm (for each plane):
102 * a. If no more rotated planes exist, all remaining rotate
103 * memory in the bank is available for use by the plane.
104 * b. If other rotated planes exist, and plane's layer ID is
105 * DE_VIDEO1, it can use all the memory from first bank if
106 * secondary rotation memory bank is available, otherwise it can
107 * use up to half the bank's memory.
108 * c. If other rotated planes exist, and plane's layer ID is not
109 * DE_VIDEO1, it can use half of the available memory
110 *
111 * Note: this algorithm assumes that the order in which the planes are
112 * checked always has DE_VIDEO1 plane first in the list if it is
113 * rotated. Because that is how we create the planes in the first
114 * place, under current DRM version things work, but if ever the order
115 * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
116 * changes, we need to pre-sort the planes before validation.
117 */
118
119 /* first count the number of rotated planes */
120 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
121 if (pstate->rotation & MALIDP_ROTATED_MASK)
122 rotated_planes++;
123 }
124
125 rot_mem_free = hwdev->rotation_memory[0];
126 /*
127 * if we have more than 1 plane using rotation memory, use the second
128 * block of rotation memory as well
129 */
130 if (rotated_planes > 1)
131 rot_mem_free += hwdev->rotation_memory[1];
132
133 /* now validate the rotation memory requirements */
134 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
135 struct malidp_plane *mp = to_malidp_plane(plane);
136 struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
137
138 if (pstate->rotation & MALIDP_ROTATED_MASK) {
139 /* process current plane */
140 rotated_planes--;
141
142 if (!rotated_planes) {
143 /* no more rotated planes, we can use what's left */
144 rot_mem_usable = rot_mem_free;
145 } else {
146 if ((mp->layer->id != DE_VIDEO1) ||
147 (hwdev->rotation_memory[1] == 0))
148 rot_mem_usable = rot_mem_free / 2;
149 else
150 rot_mem_usable = hwdev->rotation_memory[0];
151 }
152
153 rot_mem_free -= rot_mem_usable;
154
155 if (ms->rotmem_size > rot_mem_usable)
156 return -EINVAL;
157 }
158 }
159
160 return 0;
161}
162
163static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
164 .mode_fixup = malidp_crtc_mode_fixup,
165 .enable = malidp_crtc_enable,
166 .disable = malidp_crtc_disable,
167 .atomic_check = malidp_crtc_atomic_check,
168};
169
170static const struct drm_crtc_funcs malidp_crtc_funcs = {
171 .destroy = drm_crtc_cleanup,
172 .set_config = drm_atomic_helper_set_config,
173 .page_flip = drm_atomic_helper_page_flip,
174 .reset = drm_atomic_helper_crtc_reset,
175 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
176 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
177};
178
179int malidp_crtc_init(struct drm_device *drm)
180{
181 struct malidp_drm *malidp = drm->dev_private;
182 struct drm_plane *primary = NULL, *plane;
183 int ret;
184
185 ret = malidp_de_planes_init(drm);
186 if (ret < 0) {
187 DRM_ERROR("Failed to initialise planes\n");
188 return ret;
189 }
190
191 drm_for_each_plane(plane, drm) {
192 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
193 primary = plane;
194 break;
195 }
196 }
197
198 if (!primary) {
199 DRM_ERROR("no primary plane found\n");
200 ret = -EINVAL;
201 goto crtc_cleanup_planes;
202 }
203
204 ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
205 &malidp_crtc_funcs, NULL);
206
207 if (!ret) {
208 drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
209 return 0;
210 }
211
212crtc_cleanup_planes:
213 malidp_de_planes_destroy(drm);
214
215 return ret;
216}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
new file mode 100644
index 000000000000..e5b44e92f8cf
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -0,0 +1,512 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 KMS/DRM driver
11 */
12
13#include <linux/module.h>
14#include <linux/clk.h>
15#include <linux/component.h>
16#include <linux/of_device.h>
17#include <linux/of_graph.h>
18#include <linux/of_reserved_mem.h>
19
20#include <drm/drmP.h>
21#include <drm/drm_atomic.h>
22#include <drm/drm_atomic_helper.h>
23#include <drm/drm_crtc.h>
24#include <drm/drm_crtc_helper.h>
25#include <drm/drm_fb_helper.h>
26#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_gem_cma_helper.h>
28#include <drm/drm_of.h>
29
30#include "malidp_drv.h"
31#include "malidp_regs.h"
32#include "malidp_hw.h"
33
34#define MALIDP_CONF_VALID_TIMEOUT 250
35
36/*
37 * set the "config valid" bit and wait until the hardware acts on it
38 */
39static int malidp_set_and_wait_config_valid(struct drm_device *drm)
40{
41 struct malidp_drm *malidp = drm->dev_private;
42 struct malidp_hw_device *hwdev = malidp->dev;
43 int ret;
44
45 hwdev->set_config_valid(hwdev);
46 /* don't wait for config_valid flag if we are in config mode */
47 if (hwdev->in_config_mode(hwdev))
48 return 0;
49
50 ret = wait_event_interruptible_timeout(malidp->wq,
51 atomic_read(&malidp->config_valid) == 1,
52 msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
53
54 return (ret > 0) ? 0 : -ETIMEDOUT;
55}
56
57static void malidp_output_poll_changed(struct drm_device *drm)
58{
59 struct malidp_drm *malidp = drm->dev_private;
60
61 drm_fbdev_cma_hotplug_event(malidp->fbdev);
62}
63
64static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
65{
66 struct drm_pending_vblank_event *event;
67 struct drm_device *drm = state->dev;
68 struct malidp_drm *malidp = drm->dev_private;
69 int ret = malidp_set_and_wait_config_valid(drm);
70
71 if (ret)
72 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
73
74 event = malidp->crtc.state->event;
75 if (event) {
76 malidp->crtc.state->event = NULL;
77
78 spin_lock_irq(&drm->event_lock);
79 if (drm_crtc_vblank_get(&malidp->crtc) == 0)
80 drm_crtc_arm_vblank_event(&malidp->crtc, event);
81 else
82 drm_crtc_send_vblank_event(&malidp->crtc, event);
83 spin_unlock_irq(&drm->event_lock);
84 }
85 drm_atomic_helper_commit_hw_done(state);
86}
87
88static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
89{
90 struct drm_device *drm = state->dev;
91
92 drm_atomic_helper_commit_modeset_disables(drm, state);
93 drm_atomic_helper_commit_modeset_enables(drm, state);
94 drm_atomic_helper_commit_planes(drm, state, true);
95
96 malidp_atomic_commit_hw_done(state);
97
98 drm_atomic_helper_wait_for_vblanks(drm, state);
99
100 drm_atomic_helper_cleanup_planes(drm, state);
101}
102
103static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
104 .atomic_commit_tail = malidp_atomic_commit_tail,
105};
106
107static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
108 .fb_create = drm_fb_cma_create,
109 .output_poll_changed = malidp_output_poll_changed,
110 .atomic_check = drm_atomic_helper_check,
111 .atomic_commit = drm_atomic_helper_commit,
112};
113
114static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc)
115{
116 struct malidp_drm *malidp = drm->dev_private;
117 struct malidp_hw_device *hwdev = malidp->dev;
118
119 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
120 hwdev->map.de_irq_map.vsync_irq);
121 return 0;
122}
123
124static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe)
125{
126 struct malidp_drm *malidp = drm->dev_private;
127 struct malidp_hw_device *hwdev = malidp->dev;
128
129 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
130 hwdev->map.de_irq_map.vsync_irq);
131}
132
133static int malidp_init(struct drm_device *drm)
134{
135 int ret;
136 struct malidp_drm *malidp = drm->dev_private;
137 struct malidp_hw_device *hwdev = malidp->dev;
138
139 drm_mode_config_init(drm);
140
141 drm->mode_config.min_width = hwdev->min_line_size;
142 drm->mode_config.min_height = hwdev->min_line_size;
143 drm->mode_config.max_width = hwdev->max_line_size;
144 drm->mode_config.max_height = hwdev->max_line_size;
145 drm->mode_config.funcs = &malidp_mode_config_funcs;
146 drm->mode_config.helper_private = &malidp_mode_config_helpers;
147
148 ret = malidp_crtc_init(drm);
149 if (ret) {
150 drm_mode_config_cleanup(drm);
151 return ret;
152 }
153
154 return 0;
155}
156
157static int malidp_irq_init(struct platform_device *pdev)
158{
159 int irq_de, irq_se, ret = 0;
160 struct drm_device *drm = dev_get_drvdata(&pdev->dev);
161
162 /* fetch the interrupts from DT */
163 irq_de = platform_get_irq_byname(pdev, "DE");
164 if (irq_de < 0) {
165 DRM_ERROR("no 'DE' IRQ specified!\n");
166 return irq_de;
167 }
168 irq_se = platform_get_irq_byname(pdev, "SE");
169 if (irq_se < 0) {
170 DRM_ERROR("no 'SE' IRQ specified!\n");
171 return irq_se;
172 }
173
174 ret = malidp_de_irq_init(drm, irq_de);
175 if (ret)
176 return ret;
177
178 ret = malidp_se_irq_init(drm, irq_se);
179 if (ret) {
180 malidp_de_irq_fini(drm);
181 return ret;
182 }
183
184 return 0;
185}
186
187static void malidp_lastclose(struct drm_device *drm)
188{
189 struct malidp_drm *malidp = drm->dev_private;
190
191 drm_fbdev_cma_restore_mode(malidp->fbdev);
192}
193
194static const struct file_operations fops = {
195 .owner = THIS_MODULE,
196 .open = drm_open,
197 .release = drm_release,
198 .unlocked_ioctl = drm_ioctl,
199#ifdef CONFIG_COMPAT
200 .compat_ioctl = drm_compat_ioctl,
201#endif
202 .poll = drm_poll,
203 .read = drm_read,
204 .llseek = noop_llseek,
205 .mmap = drm_gem_cma_mmap,
206};
207
208static struct drm_driver malidp_driver = {
209 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
210 DRIVER_PRIME,
211 .lastclose = malidp_lastclose,
212 .get_vblank_counter = drm_vblank_no_hw_counter,
213 .enable_vblank = malidp_enable_vblank,
214 .disable_vblank = malidp_disable_vblank,
215 .gem_free_object_unlocked = drm_gem_cma_free_object,
216 .gem_vm_ops = &drm_gem_cma_vm_ops,
217 .dumb_create = drm_gem_cma_dumb_create,
218 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
219 .dumb_destroy = drm_gem_dumb_destroy,
220 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
221 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
222 .gem_prime_export = drm_gem_prime_export,
223 .gem_prime_import = drm_gem_prime_import,
224 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
225 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
226 .gem_prime_vmap = drm_gem_cma_prime_vmap,
227 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
228 .gem_prime_mmap = drm_gem_cma_prime_mmap,
229 .fops = &fops,
230 .name = "mali-dp",
231 .desc = "ARM Mali Display Processor driver",
232 .date = "20160106",
233 .major = 1,
234 .minor = 0,
235};
236
237static const struct of_device_id malidp_drm_of_match[] = {
238 {
239 .compatible = "arm,mali-dp500",
240 .data = &malidp_device[MALIDP_500]
241 },
242 {
243 .compatible = "arm,mali-dp550",
244 .data = &malidp_device[MALIDP_550]
245 },
246 {
247 .compatible = "arm,mali-dp650",
248 .data = &malidp_device[MALIDP_650]
249 },
250 {},
251};
252MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
253
254#define MAX_OUTPUT_CHANNELS 3
255
256static int malidp_bind(struct device *dev)
257{
258 struct resource *res;
259 struct drm_device *drm;
260 struct malidp_drm *malidp;
261 struct malidp_hw_device *hwdev;
262 struct platform_device *pdev = to_platform_device(dev);
263 /* number of lines for the R, G and B output */
264 u8 output_width[MAX_OUTPUT_CHANNELS];
265 int ret = 0, i;
266 u32 version, out_depth = 0;
267
268 malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
269 if (!malidp)
270 return -ENOMEM;
271
272 hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
273 if (!hwdev)
274 return -ENOMEM;
275
276 /*
277 * copy the associated data from malidp_drm_of_match to avoid
278 * having to keep a reference to the OF node after binding
279 */
280 memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
281 malidp->dev = hwdev;
282
283 INIT_LIST_HEAD(&malidp->event_list);
284
285 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
286 hwdev->regs = devm_ioremap_resource(dev, res);
287 if (IS_ERR(hwdev->regs)) {
288 DRM_ERROR("Failed to map control registers area\n");
289 return PTR_ERR(hwdev->regs);
290 }
291
292 hwdev->pclk = devm_clk_get(dev, "pclk");
293 if (IS_ERR(hwdev->pclk))
294 return PTR_ERR(hwdev->pclk);
295
296 hwdev->aclk = devm_clk_get(dev, "aclk");
297 if (IS_ERR(hwdev->aclk))
298 return PTR_ERR(hwdev->aclk);
299
300 hwdev->mclk = devm_clk_get(dev, "mclk");
301 if (IS_ERR(hwdev->mclk))
302 return PTR_ERR(hwdev->mclk);
303
304 hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
305 if (IS_ERR(hwdev->pxlclk))
306 return PTR_ERR(hwdev->pxlclk);
307
308 /* Get the optional framebuffer memory resource */
309 ret = of_reserved_mem_device_init(dev);
310 if (ret && ret != -ENODEV)
311 return ret;
312
313 drm = drm_dev_alloc(&malidp_driver, dev);
314 if (!drm) {
315 ret = -ENOMEM;
316 goto alloc_fail;
317 }
318
319 /* Enable APB clock in order to get access to the registers */
320 clk_prepare_enable(hwdev->pclk);
321 /*
322 * Enable AXI clock and main clock so that prefetch can start once
323 * the registers are set
324 */
325 clk_prepare_enable(hwdev->aclk);
326 clk_prepare_enable(hwdev->mclk);
327
328 ret = hwdev->query_hw(hwdev);
329 if (ret) {
330 DRM_ERROR("Invalid HW configuration\n");
331 goto query_hw_fail;
332 }
333
334 version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
335 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
336 (version >> 12) & 0xf, (version >> 8) & 0xf);
337
338 /* set the number of lines used for output of RGB data */
339 ret = of_property_read_u8_array(dev->of_node,
340 "arm,malidp-output-port-lines",
341 output_width, MAX_OUTPUT_CHANNELS);
342 if (ret)
343 goto query_hw_fail;
344
345 for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
346 out_depth = (out_depth << 8) | (output_width[i] & 0xf);
347 malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
348
349 drm->dev_private = malidp;
350 dev_set_drvdata(dev, drm);
351 atomic_set(&malidp->config_valid, 0);
352 init_waitqueue_head(&malidp->wq);
353
354 ret = malidp_init(drm);
355 if (ret < 0)
356 goto init_fail;
357
358 ret = drm_dev_register(drm, 0);
359 if (ret)
360 goto register_fail;
361
362 /* Set the CRTC's port so that the encoder component can find it */
363 malidp->crtc.port = of_graph_get_next_endpoint(dev->of_node, NULL);
364
365 ret = component_bind_all(dev, drm);
366 of_node_put(malidp->crtc.port);
367
368 if (ret) {
369 DRM_ERROR("Failed to bind all components\n");
370 goto bind_fail;
371 }
372
373 ret = malidp_irq_init(pdev);
374 if (ret < 0)
375 goto irq_init_fail;
376
377 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
378 if (ret < 0) {
379 DRM_ERROR("failed to initialise vblank\n");
380 goto vblank_fail;
381 }
382
383 drm_mode_config_reset(drm);
384
385 malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
386 drm->mode_config.num_connector);
387
388 if (IS_ERR(malidp->fbdev)) {
389 ret = PTR_ERR(malidp->fbdev);
390 malidp->fbdev = NULL;
391 goto fbdev_fail;
392 }
393
394 drm_kms_helper_poll_init(drm);
395 return 0;
396
397fbdev_fail:
398 drm_vblank_cleanup(drm);
399vblank_fail:
400 malidp_se_irq_fini(drm);
401 malidp_de_irq_fini(drm);
402irq_init_fail:
403 component_unbind_all(dev, drm);
404bind_fail:
405 drm_dev_unregister(drm);
406register_fail:
407 malidp_de_planes_destroy(drm);
408 drm_mode_config_cleanup(drm);
409init_fail:
410 drm->dev_private = NULL;
411 dev_set_drvdata(dev, NULL);
412query_hw_fail:
413 clk_disable_unprepare(hwdev->mclk);
414 clk_disable_unprepare(hwdev->aclk);
415 clk_disable_unprepare(hwdev->pclk);
416 drm_dev_unref(drm);
417alloc_fail:
418 of_reserved_mem_device_release(dev);
419
420 return ret;
421}
422
423static void malidp_unbind(struct device *dev)
424{
425 struct drm_device *drm = dev_get_drvdata(dev);
426 struct malidp_drm *malidp = drm->dev_private;
427 struct malidp_hw_device *hwdev = malidp->dev;
428
429 if (malidp->fbdev) {
430 drm_fbdev_cma_fini(malidp->fbdev);
431 malidp->fbdev = NULL;
432 }
433 drm_kms_helper_poll_fini(drm);
434 malidp_se_irq_fini(drm);
435 malidp_de_irq_fini(drm);
436 drm_vblank_cleanup(drm);
437 component_unbind_all(dev, drm);
438 drm_dev_unregister(drm);
439 malidp_de_planes_destroy(drm);
440 drm_mode_config_cleanup(drm);
441 drm->dev_private = NULL;
442 dev_set_drvdata(dev, NULL);
443 clk_disable_unprepare(hwdev->mclk);
444 clk_disable_unprepare(hwdev->aclk);
445 clk_disable_unprepare(hwdev->pclk);
446 drm_dev_unref(drm);
447 of_reserved_mem_device_release(dev);
448}
449
450static const struct component_master_ops malidp_master_ops = {
451 .bind = malidp_bind,
452 .unbind = malidp_unbind,
453};
454
455static int malidp_compare_dev(struct device *dev, void *data)
456{
457 struct device_node *np = data;
458
459 return dev->of_node == np;
460}
461
462static int malidp_platform_probe(struct platform_device *pdev)
463{
464 struct device_node *port, *ep;
465 struct component_match *match = NULL;
466
467 if (!pdev->dev.of_node)
468 return -ENODEV;
469
470 /* there is only one output port inside each device, find it */
471 ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
472 if (!ep)
473 return -ENODEV;
474
475 if (!of_device_is_available(ep)) {
476 of_node_put(ep);
477 return -ENODEV;
478 }
479
480 /* add the remote encoder port as component */
481 port = of_graph_get_remote_port_parent(ep);
482 of_node_put(ep);
483 if (!port || !of_device_is_available(port)) {
484 of_node_put(port);
485 return -EAGAIN;
486 }
487
488 component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
489 return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
490 match);
491}
492
493static int malidp_platform_remove(struct platform_device *pdev)
494{
495 component_master_del(&pdev->dev, &malidp_master_ops);
496 return 0;
497}
498
499static struct platform_driver malidp_platform_driver = {
500 .probe = malidp_platform_probe,
501 .remove = malidp_platform_remove,
502 .driver = {
503 .name = "mali-dp",
504 .of_match_table = malidp_drm_of_match,
505 },
506};
507
508module_platform_driver(malidp_platform_driver);
509
510MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
511MODULE_DESCRIPTION("ARM Mali DP DRM driver");
512MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
new file mode 100644
index 000000000000..95558fde214b
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -0,0 +1,54 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures
11 */
12
13#ifndef __MALIDP_DRV_H__
14#define __MALIDP_DRV_H__
15
16#include <linux/mutex.h>
17#include <linux/wait.h>
18#include "malidp_hw.h"
19
20struct malidp_drm {
21 struct malidp_hw_device *dev;
22 struct drm_fbdev_cma *fbdev;
23 struct list_head event_list;
24 struct drm_crtc crtc;
25 wait_queue_head_t wq;
26 atomic_t config_valid;
27};
28
29#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
30
31struct malidp_plane {
32 struct drm_plane base;
33 struct malidp_hw_device *hwdev;
34 const struct malidp_layer *layer;
35};
36
37struct malidp_plane_state {
38 struct drm_plane_state base;
39
40 /* size of the required rotation memory if plane is rotated */
41 u32 rotmem_size;
42};
43
44#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
45#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
46
47int malidp_de_planes_init(struct drm_device *drm);
48void malidp_de_planes_destroy(struct drm_device *drm);
49int malidp_crtc_init(struct drm_device *drm);
50
51/* often used combination of rotational bits */
52#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
53
54#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
new file mode 100644
index 000000000000..a6132f1d58c1
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -0,0 +1,691 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
11 * the difference between various versions of the hardware is being dealt with
12 * in an attempt to provide to the rest of the driver code a unified view
13 */
14
15#include <linux/types.h>
16#include <linux/io.h>
17#include <drm/drmP.h>
18#include <video/videomode.h>
19#include <video/display_timing.h>
20
21#include "malidp_drv.h"
22#include "malidp_hw.h"
23
24static const struct malidp_input_format malidp500_de_formats[] = {
25 /* fourcc, layers supporting the format, internal id */
26 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
27 { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
28 { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
29 { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
30 { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
31 { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
32 { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
33 { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
34 { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
35 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 },
36 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
37 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
38 { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
39 { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
40 { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
41 { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
42};
43
44#define MALIDP_ID(__group, __format) \
45 ((((__group) & 0x7) << 3) | ((__format) & 0x7))
46
47#define MALIDP_COMMON_FORMATS \
48 /* fourcc, layers supporting the format, internal id */ \
49 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
50 { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
51 { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
52 { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
53 { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
54 { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
55 { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
56 { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
57 { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
58 { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
59 { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
60 { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
61 { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
62 { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
63 { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
64 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
65 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
66 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
67 { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
68 { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
69 { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
70 { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
71
72static const struct malidp_input_format malidp550_de_formats[] = {
73 MALIDP_COMMON_FORMATS,
74};
75
76static const struct malidp_layer malidp500_layers[] = {
77 { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
78 { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
79 { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
80};
81
82static const struct malidp_layer malidp550_layers[] = {
83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
87};
88
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5
90
91static int malidp500_query_hw(struct malidp_hw_device *hwdev)
92{
93 u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
94 /* bit 4 of the CONFIG_ID register holds the line size multiplier */
95 u8 ln_size_mult = conf & 0x10 ? 2 : 1;
96
97 hwdev->min_line_size = 2;
98 hwdev->max_line_size = SZ_2K * ln_size_mult;
99 hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
100 hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
101
102 return 0;
103}
104
105static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
106{
107 u32 status, count = 100;
108
109 malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
110 while (count) {
111 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
112 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
113 break;
114 /*
115 * entering config mode can take as long as the rendering
116 * of a full frame, hence the long sleep here
117 */
118 usleep_range(1000, 10000);
119 count--;
120 }
121 WARN(count == 0, "timeout while entering config mode");
122}
123
124static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
125{
126 u32 status, count = 100;
127
128 malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
129 while (count) {
130 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
131 if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
132 break;
133 usleep_range(100, 1000);
134 count--;
135 }
136 WARN(count == 0, "timeout while leaving config mode");
137}
138
139static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
140{
141 u32 status;
142
143 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
144 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
145 return true;
146
147 return false;
148}
149
150static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
151{
152 malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
153}
154
155static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
156{
157 u32 val = 0;
158
159 malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
160 if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
161 val |= MALIDP500_HSYNCPOL;
162 if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
163 val |= MALIDP500_VSYNCPOL;
164 val |= MALIDP_DE_DEFAULT_PREFETCH_START;
165 malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
166
167 /*
168 * Mali-DP500 encodes the background color like this:
169 * - red @ MALIDP500_BGND_COLOR[12:0]
170 * - green @ MALIDP500_BGND_COLOR[27:16]
171 * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0]
172 */
173 val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
174 (MALIDP_BGND_COLOR_R & 0xfff);
175 malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
176 malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
177
178 val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
179 MALIDP_DE_H_BACKPORCH(mode->hback_porch);
180 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
181
182 val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
183 MALIDP_DE_V_BACKPORCH(mode->vback_porch);
184 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
185
186 val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
187 MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
188 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
189
190 val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
191 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
192
193 if (mode->flags & DISPLAY_FLAGS_INTERLACED)
194 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
195 else
196 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
197}
198
199static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
200{
201 unsigned int depth;
202 int bpp;
203
204 /* RGB888 or BGR888 can't be rotated */
205 if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
206 return -EINVAL;
207
208 /*
209 * Each layer needs enough rotation memory to fit 8 lines
210 * worth of pixel data. Required size is then:
211 * size = rotated_width * (bpp / 8) * 8;
212 */
213 drm_fb_get_bpp_depth(fmt, &depth, &bpp);
214
215 return w * bpp;
216}
217
218static int malidp550_query_hw(struct malidp_hw_device *hwdev)
219{
220 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
221 u8 ln_size = (conf >> 4) & 0x3, rsize;
222
223 hwdev->min_line_size = 2;
224
225 switch (ln_size) {
226 case 0:
227 hwdev->max_line_size = SZ_2K;
228 /* two banks of 64KB for rotation memory */
229 rsize = 64;
230 break;
231 case 1:
232 hwdev->max_line_size = SZ_4K;
233 /* two banks of 128KB for rotation memory */
234 rsize = 128;
235 break;
236 case 2:
237 hwdev->max_line_size = 1280;
238 /* two banks of 40KB for rotation memory */
239 rsize = 40;
240 break;
241 case 3:
242 /* reserved value */
243 hwdev->max_line_size = 0;
244 return -EINVAL;
245 }
246
247 hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
248 return 0;
249}
250
251static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
252{
253 u32 status, count = 100;
254
255 malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
256 while (count) {
257 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
258 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
259 break;
260 /*
261 * entering config mode can take as long as the rendering
262 * of a full frame, hence the long sleep here
263 */
264 usleep_range(1000, 10000);
265 count--;
266 }
267 WARN(count == 0, "timeout while entering config mode");
268}
269
270static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
271{
272 u32 status, count = 100;
273
274 malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
275 while (count) {
276 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
277 if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
278 break;
279 usleep_range(100, 1000);
280 count--;
281 }
282 WARN(count == 0, "timeout while leaving config mode");
283}
284
285static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
286{
287 u32 status;
288
289 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
290 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
291 return true;
292
293 return false;
294}
295
296static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
297{
298 malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
299}
300
301static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
302{
303 u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
304
305 malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
306 /*
307 * Mali-DP550 and Mali-DP650 encode the background color like this:
308 * - red @ MALIDP550_DE_BGND_COLOR[23:16]
309 * - green @ MALIDP550_DE_BGND_COLOR[15:8]
310 * - blue @ MALIDP550_DE_BGND_COLOR[7:0]
311 *
312 * We need to truncate the least significant 4 bits from the default
313 * MALIDP_BGND_COLOR_x values
314 */
315 val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
316 (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
317 ((MALIDP_BGND_COLOR_B >> 4) & 0xff);
318 malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
319
320 val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
321 MALIDP_DE_H_BACKPORCH(mode->hback_porch);
322 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
323
324 val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
325 MALIDP_DE_V_BACKPORCH(mode->vback_porch);
326 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
327
328 val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
329 MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
330 if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
331 val |= MALIDP550_HSYNCPOL;
332 if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
333 val |= MALIDP550_VSYNCPOL;
334 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
335
336 val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
337 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
338
339 if (mode->flags & DISPLAY_FLAGS_INTERLACED)
340 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
341 else
342 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
343}
344
345static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
346{
347 u32 bytes_per_col;
348
349 /* raw RGB888 or BGR888 can't be rotated */
350 if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
351 return -EINVAL;
352
353 switch (fmt) {
354 /* 8 lines at 4 bytes per pixel */
355 case DRM_FORMAT_ARGB2101010:
356 case DRM_FORMAT_ABGR2101010:
357 case DRM_FORMAT_RGBA1010102:
358 case DRM_FORMAT_BGRA1010102:
359 case DRM_FORMAT_ARGB8888:
360 case DRM_FORMAT_ABGR8888:
361 case DRM_FORMAT_RGBA8888:
362 case DRM_FORMAT_BGRA8888:
363 case DRM_FORMAT_XRGB8888:
364 case DRM_FORMAT_XBGR8888:
365 case DRM_FORMAT_RGBX8888:
366 case DRM_FORMAT_BGRX8888:
367 case DRM_FORMAT_RGB888:
368 case DRM_FORMAT_BGR888:
369 /* 16 lines at 2 bytes per pixel */
370 case DRM_FORMAT_RGBA5551:
371 case DRM_FORMAT_ABGR1555:
372 case DRM_FORMAT_RGB565:
373 case DRM_FORMAT_BGR565:
374 case DRM_FORMAT_UYVY:
375 case DRM_FORMAT_YUYV:
376 bytes_per_col = 32;
377 break;
378 /* 16 lines at 1.5 bytes per pixel */
379 case DRM_FORMAT_NV12:
380 case DRM_FORMAT_YUV420:
381 bytes_per_col = 24;
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 return w * bytes_per_col;
388}
389
390static int malidp650_query_hw(struct malidp_hw_device *hwdev)
391{
392 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
393 u8 ln_size = (conf >> 4) & 0x3, rsize;
394
395 hwdev->min_line_size = 4;
396
397 switch (ln_size) {
398 case 0:
399 case 2:
400 /* reserved values */
401 hwdev->max_line_size = 0;
402 return -EINVAL;
403 case 1:
404 hwdev->max_line_size = SZ_4K;
405 /* two banks of 128KB for rotation memory */
406 rsize = 128;
407 break;
408 case 3:
409 hwdev->max_line_size = 2560;
410 /* two banks of 80KB for rotation memory */
411 rsize = 80;
412 }
413
414 hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
415 return 0;
416}
417
418const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
419 [MALIDP_500] = {
420 .map = {
421 .se_base = MALIDP500_SE_BASE,
422 .dc_base = MALIDP500_DC_BASE,
423 .out_depth_base = MALIDP500_OUTPUT_DEPTH,
424 .features = 0, /* no CLEARIRQ register */
425 .n_layers = ARRAY_SIZE(malidp500_layers),
426 .layers = malidp500_layers,
427 .de_irq_map = {
428 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
429 MALIDP500_DE_IRQ_AXI_ERR |
430 MALIDP500_DE_IRQ_VSYNC |
431 MALIDP500_DE_IRQ_GLOBAL,
432 .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
433 },
434 .se_irq_map = {
435 .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
436 .vsync_irq = 0,
437 },
438 .dc_irq_map = {
439 .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
440 .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
441 },
442 .input_formats = malidp500_de_formats,
443 .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
444 },
445 .query_hw = malidp500_query_hw,
446 .enter_config_mode = malidp500_enter_config_mode,
447 .leave_config_mode = malidp500_leave_config_mode,
448 .in_config_mode = malidp500_in_config_mode,
449 .set_config_valid = malidp500_set_config_valid,
450 .modeset = malidp500_modeset,
451 .rotmem_required = malidp500_rotmem_required,
452 },
453 [MALIDP_550] = {
454 .map = {
455 .se_base = MALIDP550_SE_BASE,
456 .dc_base = MALIDP550_DC_BASE,
457 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
458 .features = MALIDP_REGMAP_HAS_CLEARIRQ,
459 .n_layers = ARRAY_SIZE(malidp550_layers),
460 .layers = malidp550_layers,
461 .de_irq_map = {
462 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
463 MALIDP550_DE_IRQ_VSYNC,
464 .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
465 },
466 .se_irq_map = {
467 .irq_mask = MALIDP550_SE_IRQ_EOW |
468 MALIDP550_SE_IRQ_AXI_ERR,
469 },
470 .dc_irq_map = {
471 .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
472 .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
473 },
474 .input_formats = malidp550_de_formats,
475 .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
476 },
477 .query_hw = malidp550_query_hw,
478 .enter_config_mode = malidp550_enter_config_mode,
479 .leave_config_mode = malidp550_leave_config_mode,
480 .in_config_mode = malidp550_in_config_mode,
481 .set_config_valid = malidp550_set_config_valid,
482 .modeset = malidp550_modeset,
483 .rotmem_required = malidp550_rotmem_required,
484 },
485 [MALIDP_650] = {
486 .map = {
487 .se_base = MALIDP550_SE_BASE,
488 .dc_base = MALIDP550_DC_BASE,
489 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
490 .features = MALIDP_REGMAP_HAS_CLEARIRQ,
491 .n_layers = ARRAY_SIZE(malidp550_layers),
492 .layers = malidp550_layers,
493 .de_irq_map = {
494 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
495 MALIDP650_DE_IRQ_DRIFT |
496 MALIDP550_DE_IRQ_VSYNC,
497 .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
498 },
499 .se_irq_map = {
500 .irq_mask = MALIDP550_SE_IRQ_EOW |
501 MALIDP550_SE_IRQ_AXI_ERR,
502 },
503 .dc_irq_map = {
504 .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
505 .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
506 },
507 .input_formats = malidp550_de_formats,
508 .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
509 },
510 .query_hw = malidp650_query_hw,
511 .enter_config_mode = malidp550_enter_config_mode,
512 .leave_config_mode = malidp550_leave_config_mode,
513 .in_config_mode = malidp550_in_config_mode,
514 .set_config_valid = malidp550_set_config_valid,
515 .modeset = malidp550_modeset,
516 .rotmem_required = malidp550_rotmem_required,
517 },
518};
519
520u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
521 u8 layer_id, u32 format)
522{
523 unsigned int i;
524
525 for (i = 0; i < map->n_input_formats; i++) {
526 if (((map->input_formats[i].layer & layer_id) == layer_id) &&
527 (map->input_formats[i].format == format))
528 return map->input_formats[i].id;
529 }
530
531 return MALIDP_INVALID_FORMAT_ID;
532}
533
534static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
535{
536 u32 base = malidp_get_block_base(hwdev, block);
537
538 if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
539 malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
540 else
541 malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
542}
543
544static irqreturn_t malidp_de_irq(int irq, void *arg)
545{
546 struct drm_device *drm = arg;
547 struct malidp_drm *malidp = drm->dev_private;
548 struct malidp_hw_device *hwdev;
549 const struct malidp_irq_map *de;
550 u32 status, mask, dc_status;
551 irqreturn_t ret = IRQ_NONE;
552
553 if (!drm->dev_private)
554 return IRQ_HANDLED;
555
556 hwdev = malidp->dev;
557 de = &hwdev->map.de_irq_map;
558
559 /* first handle the config valid IRQ */
560 dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
561 if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
562 /* we have a page flip event */
563 atomic_set(&malidp->config_valid, 1);
564 malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
565 ret = IRQ_WAKE_THREAD;
566 }
567
568 status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
569 if (!(status & de->irq_mask))
570 return ret;
571
572 mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
573 status &= mask;
574 if (status & de->vsync_irq)
575 drm_crtc_handle_vblank(&malidp->crtc);
576
577 malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
578
579 return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
580}
581
582static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
583{
584 struct drm_device *drm = arg;
585 struct malidp_drm *malidp = drm->dev_private;
586
587 wake_up(&malidp->wq);
588
589 return IRQ_HANDLED;
590}
591
592int malidp_de_irq_init(struct drm_device *drm, int irq)
593{
594 struct malidp_drm *malidp = drm->dev_private;
595 struct malidp_hw_device *hwdev = malidp->dev;
596 int ret;
597
598 /* ensure interrupts are disabled */
599 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
600 malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
601 malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
602 malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
603
604 ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
605 malidp_de_irq_thread_handler,
606 IRQF_SHARED, "malidp-de", drm);
607 if (ret < 0) {
608 DRM_ERROR("failed to install DE IRQ handler\n");
609 return ret;
610 }
611
612 /* first enable the DC block IRQs */
613 malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
614 hwdev->map.dc_irq_map.irq_mask);
615
616 /* now enable the DE block IRQs */
617 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
618 hwdev->map.de_irq_map.irq_mask);
619
620 return 0;
621}
622
623void malidp_de_irq_fini(struct drm_device *drm)
624{
625 struct malidp_drm *malidp = drm->dev_private;
626 struct malidp_hw_device *hwdev = malidp->dev;
627
628 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
629 hwdev->map.de_irq_map.irq_mask);
630 malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
631 hwdev->map.dc_irq_map.irq_mask);
632}
633
634static irqreturn_t malidp_se_irq(int irq, void *arg)
635{
636 struct drm_device *drm = arg;
637 struct malidp_drm *malidp = drm->dev_private;
638 struct malidp_hw_device *hwdev = malidp->dev;
639 u32 status, mask;
640
641 status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
642 if (!(status & hwdev->map.se_irq_map.irq_mask))
643 return IRQ_NONE;
644
645 mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
646 status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
647 status &= mask;
648 /* ToDo: status decoding and firing up of VSYNC and page flip events */
649
650 malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
651
652 return IRQ_HANDLED;
653}
654
655static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
656{
657 return IRQ_HANDLED;
658}
659
660int malidp_se_irq_init(struct drm_device *drm, int irq)
661{
662 struct malidp_drm *malidp = drm->dev_private;
663 struct malidp_hw_device *hwdev = malidp->dev;
664 int ret;
665
666 /* ensure interrupts are disabled */
667 malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
668 malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
669
670 ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
671 malidp_se_irq_thread_handler,
672 IRQF_SHARED, "malidp-se", drm);
673 if (ret < 0) {
674 DRM_ERROR("failed to install SE IRQ handler\n");
675 return ret;
676 }
677
678 malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
679 hwdev->map.se_irq_map.irq_mask);
680
681 return 0;
682}
683
684void malidp_se_irq_fini(struct drm_device *drm)
685{
686 struct malidp_drm *malidp = drm->dev_private;
687 struct malidp_hw_device *hwdev = malidp->dev;
688
689 malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
690 hwdev->map.se_irq_map.irq_mask);
691}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
new file mode 100644
index 000000000000..141743e9f3a6
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -0,0 +1,241 @@
1/*
2 *
3 * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP hardware manipulation routines.
11 */
12
13#ifndef __MALIDP_HW_H__
14#define __MALIDP_HW_H__
15
16#include <linux/bitops.h>
17#include "malidp_regs.h"
18
19struct videomode;
20struct clk;
21
22/* Mali DP IP blocks */
23enum {
24 MALIDP_DE_BLOCK = 0,
25 MALIDP_SE_BLOCK,
26 MALIDP_DC_BLOCK
27};
28
29/* Mali DP layer IDs */
30enum {
31 DE_VIDEO1 = BIT(0),
32 DE_GRAPHICS1 = BIT(1),
33 DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
34 DE_VIDEO2 = BIT(3),
35 DE_SMART = BIT(4),
36};
37
38struct malidp_input_format {
39 u32 format; /* DRM fourcc */
40 u8 layer; /* bitmask of layers supporting it */
41 u8 id; /* used internally */
42};
43
44#define MALIDP_INVALID_FORMAT_ID 0xff
45
46/*
47 * hide the differences between register maps
48 * by using a common structure to hold the
49 * base register offsets
50 */
51
52struct malidp_irq_map {
53 u32 irq_mask; /* mask of IRQs that can be enabled in the block */
54 u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
55};
56
57struct malidp_layer {
58 u16 id; /* layer ID */
59 u16 base; /* address offset for the register bank */
60 u16 ptr; /* address offset for the pointer register */
61};
62
63/* regmap features */
64#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
65
66struct malidp_hw_regmap {
67 /* address offset of the DE register bank */
68 /* is always 0x0000 */
69 /* address offset of the SE registers bank */
70 const u16 se_base;
71 /* address offset of the DC registers bank */
72 const u16 dc_base;
73
74 /* address offset for the output depth register */
75 const u16 out_depth_base;
76
77 /* bitmap with register map features */
78 const u8 features;
79
80 /* list of supported layers */
81 const u8 n_layers;
82 const struct malidp_layer *layers;
83
84 const struct malidp_irq_map de_irq_map;
85 const struct malidp_irq_map se_irq_map;
86 const struct malidp_irq_map dc_irq_map;
87
88 /* list of supported input formats for each layer */
89 const struct malidp_input_format *input_formats;
90 const u8 n_input_formats;
91};
92
93struct malidp_hw_device {
94 const struct malidp_hw_regmap map;
95 void __iomem *regs;
96
97 /* APB clock */
98 struct clk *pclk;
99 /* AXI clock */
100 struct clk *aclk;
101 /* main clock for display core */
102 struct clk *mclk;
103 /* pixel clock for display core */
104 struct clk *pxlclk;
105
106 /*
107 * Validate the driver instance against the hardware bits
108 */
109 int (*query_hw)(struct malidp_hw_device *hwdev);
110
111 /*
112 * Set the hardware into config mode, ready to accept mode changes
113 */
114 void (*enter_config_mode)(struct malidp_hw_device *hwdev);
115
116 /*
117 * Tell hardware to exit configuration mode
118 */
119 void (*leave_config_mode)(struct malidp_hw_device *hwdev);
120
121 /*
122 * Query if hardware is in configuration mode
123 */
124 bool (*in_config_mode)(struct malidp_hw_device *hwdev);
125
126 /*
127 * Set configuration valid flag for hardware parameters that can
128 * be changed outside the configuration mode. Hardware will use
129 * the new settings when config valid is set after the end of the
130 * current buffer scanout
131 */
132 void (*set_config_valid)(struct malidp_hw_device *hwdev);
133
134 /*
135 * Set a new mode in hardware. Requires the hardware to be in
136 * configuration mode before this function is called.
137 */
138 void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m);
139
140 /*
141 * Calculate the required rotation memory given the active area
142 * and the buffer format.
143 */
144 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
145
146 u8 features;
147
148 u8 min_line_size;
149 u16 max_line_size;
150
151 /* size of memory used for rotating layers, up to two banks available */
152 u32 rotation_memory[2];
153};
154
155/* Supported variants of the hardware */
156enum {
157 MALIDP_500 = 0,
158 MALIDP_550,
159 MALIDP_650,
160 /* keep the next entry last */
161 MALIDP_MAX_DEVICES
162};
163
164extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
165
166static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
167{
168 return readl(hwdev->regs + reg);
169}
170
171static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
172 u32 value, u32 reg)
173{
174 writel(value, hwdev->regs + reg);
175}
176
177static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev,
178 u32 mask, u32 reg)
179{
180 u32 data = malidp_hw_read(hwdev, reg);
181
182 data |= mask;
183 malidp_hw_write(hwdev, data, reg);
184}
185
186static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev,
187 u32 mask, u32 reg)
188{
189 u32 data = malidp_hw_read(hwdev, reg);
190
191 data &= ~mask;
192 malidp_hw_write(hwdev, data, reg);
193}
194
195static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
196 u8 block)
197{
198 switch (block) {
199 case MALIDP_SE_BLOCK:
200 return hwdev->map.se_base;
201 case MALIDP_DC_BLOCK:
202 return hwdev->map.dc_base;
203 }
204
205 return 0;
206}
207
208static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev,
209 u8 block, u32 irq)
210{
211 u32 base = malidp_get_block_base(hwdev, block);
212
213 malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
214}
215
216static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
217 u8 block, u32 irq)
218{
219 u32 base = malidp_get_block_base(hwdev, block);
220
221 malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
222}
223
224int malidp_de_irq_init(struct drm_device *drm, int irq);
225void malidp_de_irq_fini(struct drm_device *drm);
226int malidp_se_irq_init(struct drm_device *drm, int irq);
227void malidp_se_irq_fini(struct drm_device *drm);
228
229u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
230 u8 layer_id, u32 format);
231
232/*
233 * background color components are defined as 12bits values,
234 * they will be shifted right when stored on hardware that
235 * supports only 8bits per channel
236 */
237#define MALIDP_BGND_COLOR_R 0x000
238#define MALIDP_BGND_COLOR_G 0x000
239#define MALIDP_BGND_COLOR_B 0x000
240
241#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
new file mode 100644
index 000000000000..725098d6179a
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -0,0 +1,298 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP plane manipulation routines.
11 */
12
13#include <drm/drmP.h>
14#include <drm/drm_atomic_helper.h>
15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_gem_cma_helper.h>
17#include <drm/drm_plane_helper.h>
18
19#include "malidp_hw.h"
20#include "malidp_drv.h"
21
22/* Layer specific register offsets */
23#define MALIDP_LAYER_FORMAT 0x000
24#define MALIDP_LAYER_CONTROL 0x004
25#define LAYER_ENABLE (1 << 0)
26#define LAYER_ROT_OFFSET 8
27#define LAYER_H_FLIP (1 << 10)
28#define LAYER_V_FLIP (1 << 11)
29#define LAYER_ROT_MASK (0xf << 8)
30#define MALIDP_LAYER_SIZE 0x00c
31#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
32#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
33#define MALIDP_LAYER_COMP_SIZE 0x010
34#define MALIDP_LAYER_OFFSET 0x014
35#define MALIDP_LAYER_STRIDE 0x018
36
37static void malidp_de_plane_destroy(struct drm_plane *plane)
38{
39 struct malidp_plane *mp = to_malidp_plane(plane);
40
41 if (mp->base.fb)
42 drm_framebuffer_unreference(mp->base.fb);
43
44 drm_plane_helper_disable(plane);
45 drm_plane_cleanup(plane);
46 devm_kfree(plane->dev->dev, mp);
47}
48
49struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
50{
51 struct malidp_plane_state *state, *m_state;
52
53 if (!plane->state)
54 return NULL;
55
56 state = kmalloc(sizeof(*state), GFP_KERNEL);
57 if (state) {
58 m_state = to_malidp_plane_state(plane->state);
59 __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
60 state->rotmem_size = m_state->rotmem_size;
61 }
62
63 return &state->base;
64}
65
66void malidp_destroy_plane_state(struct drm_plane *plane,
67 struct drm_plane_state *state)
68{
69 struct malidp_plane_state *m_state = to_malidp_plane_state(state);
70
71 __drm_atomic_helper_plane_destroy_state(state);
72 kfree(m_state);
73}
74
75static const struct drm_plane_funcs malidp_de_plane_funcs = {
76 .update_plane = drm_atomic_helper_update_plane,
77 .disable_plane = drm_atomic_helper_disable_plane,
78 .destroy = malidp_de_plane_destroy,
79 .reset = drm_atomic_helper_plane_reset,
80 .atomic_duplicate_state = malidp_duplicate_plane_state,
81 .atomic_destroy_state = malidp_destroy_plane_state,
82};
83
84static int malidp_de_plane_check(struct drm_plane *plane,
85 struct drm_plane_state *state)
86{
87 struct malidp_plane *mp = to_malidp_plane(plane);
88 struct malidp_plane_state *ms = to_malidp_plane_state(state);
89 u8 format_id;
90 u32 src_w, src_h;
91
92 if (!state->crtc || !state->fb)
93 return 0;
94
95 format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
96 state->fb->pixel_format);
97 if (format_id == MALIDP_INVALID_FORMAT_ID)
98 return -EINVAL;
99
100 src_w = state->src_w >> 16;
101 src_h = state->src_h >> 16;
102
103 if ((state->crtc_w > mp->hwdev->max_line_size) ||
104 (state->crtc_h > mp->hwdev->max_line_size) ||
105 (state->crtc_w < mp->hwdev->min_line_size) ||
106 (state->crtc_h < mp->hwdev->min_line_size) ||
107 (state->crtc_w != src_w) || (state->crtc_h != src_h))
108 return -EINVAL;
109
110 /* packed RGB888 / BGR888 can't be rotated or flipped */
111 if (state->rotation != BIT(DRM_ROTATE_0) &&
112 (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
113 state->fb->pixel_format == DRM_FORMAT_BGR888))
114 return -EINVAL;
115
116 ms->rotmem_size = 0;
117 if (state->rotation & MALIDP_ROTATED_MASK) {
118 int val;
119
120 val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
121 state->crtc_w,
122 state->fb->pixel_format);
123 if (val < 0)
124 return val;
125
126 ms->rotmem_size = val;
127 }
128
129 return 0;
130}
131
132static void malidp_de_plane_update(struct drm_plane *plane,
133 struct drm_plane_state *old_state)
134{
135 struct drm_gem_cma_object *obj;
136 struct malidp_plane *mp;
137 const struct malidp_hw_regmap *map;
138 u8 format_id;
139 u16 ptr;
140 u32 format, src_w, src_h, dest_w, dest_h, val = 0;
141 int num_planes, i;
142
143 mp = to_malidp_plane(plane);
144
145 map = &mp->hwdev->map;
146 format = plane->state->fb->pixel_format;
147 format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
148 num_planes = drm_format_num_planes(format);
149
150 /* convert src values from Q16 fixed point to integer */
151 src_w = plane->state->src_w >> 16;
152 src_h = plane->state->src_h >> 16;
153 if (plane->state->rotation & MALIDP_ROTATED_MASK) {
154 dest_w = plane->state->crtc_h;
155 dest_h = plane->state->crtc_w;
156 } else {
157 dest_w = plane->state->crtc_w;
158 dest_h = plane->state->crtc_h;
159 }
160
161 malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
162
163 for (i = 0; i < num_planes; i++) {
164 /* calculate the offset for the layer's plane registers */
165 ptr = mp->layer->ptr + (i << 4);
166
167 obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
168 malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
169 malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
170 malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
171 mp->layer->base + MALIDP_LAYER_STRIDE);
172 }
173
174 malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
175 mp->layer->base + MALIDP_LAYER_SIZE);
176
177 malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
178 mp->layer->base + MALIDP_LAYER_COMP_SIZE);
179
180 malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
181 LAYER_V_VAL(plane->state->crtc_y),
182 mp->layer->base + MALIDP_LAYER_OFFSET);
183
184 /* first clear the rotation bits in the register */
185 malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
186 mp->layer->base + MALIDP_LAYER_CONTROL);
187
188 /* setup the rotation and axis flip bits */
189 if (plane->state->rotation & DRM_ROTATE_MASK)
190 val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
191 if (plane->state->rotation & BIT(DRM_REFLECT_X))
192 val |= LAYER_V_FLIP;
193 if (plane->state->rotation & BIT(DRM_REFLECT_Y))
194 val |= LAYER_H_FLIP;
195
196 /* set the 'enable layer' bit */
197 val |= LAYER_ENABLE;
198
199 malidp_hw_setbits(mp->hwdev, val,
200 mp->layer->base + MALIDP_LAYER_CONTROL);
201}
202
203static void malidp_de_plane_disable(struct drm_plane *plane,
204 struct drm_plane_state *state)
205{
206 struct malidp_plane *mp = to_malidp_plane(plane);
207
208 malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
209 mp->layer->base + MALIDP_LAYER_CONTROL);
210}
211
212static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
213 .atomic_check = malidp_de_plane_check,
214 .atomic_update = malidp_de_plane_update,
215 .atomic_disable = malidp_de_plane_disable,
216};
217
218int malidp_de_planes_init(struct drm_device *drm)
219{
220 struct malidp_drm *malidp = drm->dev_private;
221 const struct malidp_hw_regmap *map = &malidp->dev->map;
222 struct malidp_plane *plane = NULL;
223 enum drm_plane_type plane_type;
224 unsigned long crtcs = 1 << drm->mode_config.num_crtc;
225 u32 *formats;
226 int ret, i, j, n;
227
228 formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
229 if (!formats) {
230 ret = -ENOMEM;
231 goto cleanup;
232 }
233
234 for (i = 0; i < map->n_layers; i++) {
235 u8 id = map->layers[i].id;
236
237 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
238 if (!plane) {
239 ret = -ENOMEM;
240 goto cleanup;
241 }
242
243 /* build the list of DRM supported formats based on the map */
244 for (n = 0, j = 0; j < map->n_input_formats; j++) {
245 if ((map->input_formats[j].layer & id) == id)
246 formats[n++] = map->input_formats[j].format;
247 }
248
249 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
250 DRM_PLANE_TYPE_OVERLAY;
251 ret = drm_universal_plane_init(drm, &plane->base, crtcs,
252 &malidp_de_plane_funcs, formats,
253 n, plane_type, NULL);
254 if (ret < 0)
255 goto cleanup;
256
257 if (!drm->mode_config.rotation_property) {
258 unsigned long flags = BIT(DRM_ROTATE_0) |
259 BIT(DRM_ROTATE_90) |
260 BIT(DRM_ROTATE_180) |
261 BIT(DRM_ROTATE_270) |
262 BIT(DRM_REFLECT_X) |
263 BIT(DRM_REFLECT_Y);
264 drm->mode_config.rotation_property =
265 drm_mode_create_rotation_property(drm, flags);
266 }
267 /* SMART layer can't be rotated */
268 if (drm->mode_config.rotation_property && (id != DE_SMART))
269 drm_object_attach_property(&plane->base.base,
270 drm->mode_config.rotation_property,
271 BIT(DRM_ROTATE_0));
272
273 drm_plane_helper_add(&plane->base,
274 &malidp_de_plane_helper_funcs);
275 plane->hwdev = malidp->dev;
276 plane->layer = &map->layers[i];
277 }
278
279 kfree(formats);
280
281 return 0;
282
283cleanup:
284 malidp_de_planes_destroy(drm);
285 kfree(formats);
286
287 return ret;
288}
289
290void malidp_de_planes_destroy(struct drm_device *drm)
291{
292 struct drm_plane *p, *pt;
293
294 list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
295 drm_plane_cleanup(p);
296 kfree(p);
297 }
298}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
new file mode 100644
index 000000000000..73fecb38f955
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -0,0 +1,172 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 registers definition.
11 */
12
13#ifndef __MALIDP_REGS_H__
14#define __MALIDP_REGS_H__
15
16/*
17 * abbreviations used:
18 * - DC - display core (general settings)
19 * - DE - display engine
20 * - SE - scaling engine
21 */
22
23/* interrupt bit masks */
24#define MALIDP_DE_IRQ_UNDERRUN (1 << 0)
25
26#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4)
27#define MALIDP500_DE_IRQ_VSYNC (1 << 5)
28#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6)
29#define MALIDP500_DE_IRQ_SATURATION (1 << 7)
30#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8)
31#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11)
32#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17)
33#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18)
34#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19)
35#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24)
36#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28)
37#define MALIDP500_DE_IRQ_GLOBAL (1 << 31)
38#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0)
39#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4)
40#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5)
41#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8)
42#define MALIDP500_SE_IRQ_OVERRUN (1 << 9)
43#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12)
44#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13)
45#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17)
46#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18)
47#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28)
48#define MALIDP500_SE_IRQ_GLOBAL (1 << 31)
49
50#define MALIDP550_DE_IRQ_SATURATION (1 << 8)
51#define MALIDP550_DE_IRQ_VSYNC (1 << 12)
52#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13)
53#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
54#define MALIDP550_SE_IRQ_EOW (1 << 0)
55#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
56#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
57#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
58#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
59#define MALIDP550_DC_IRQ_DE (1 << 20)
60#define MALIDP550_DC_IRQ_SE (1 << 24)
61
62#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
63
64/* bit masks that are common between products */
65#define MALIDP_CFG_VALID (1 << 0)
66#define MALIDP_DISP_FUNC_ILACED (1 << 8)
67
68/* register offsets for IRQ management */
69#define MALIDP_REG_STATUS 0x00000
70#define MALIDP_REG_SETIRQ 0x00004
71#define MALIDP_REG_MASKIRQ 0x00008
72#define MALIDP_REG_CLEARIRQ 0x0000c
73
74/* register offsets */
75#define MALIDP_DE_CORE_ID 0x00018
76#define MALIDP_DE_DISPLAY_FUNC 0x00020
77
78/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */
79#define MALIDP_DE_H_TIMINGS 0x0
80#define MALIDP_DE_V_TIMINGS 0x4
81#define MALIDP_DE_SYNC_WIDTH 0x8
82#define MALIDP_DE_HV_ACTIVE 0xc
83
84/* macros to set values into registers */
85#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
86#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16)
87#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0)
88#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0)
89#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16)
90#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0)
91#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16)
92#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0)
93#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16)
94
95/* register offsets and bits specific to DP500 */
96#define MALIDP500_DC_BASE 0x00000
97#define MALIDP500_DC_CONTROL 0x0000c
98#define MALIDP500_DC_CONFIG_REQ (1 << 17)
99#define MALIDP500_HSYNCPOL (1 << 20)
100#define MALIDP500_VSYNCPOL (1 << 21)
101#define MALIDP500_DC_CLEAR_MASK 0x300fff
102#define MALIDP500_DE_LINE_COUNTER 0x00010
103#define MALIDP500_DE_AXI_CONTROL 0x00014
104#define MALIDP500_DE_SECURE_CTRL 0x0001c
105#define MALIDP500_DE_CHROMA_KEY 0x00024
106#define MALIDP500_TIMINGS_BASE 0x00028
107
108#define MALIDP500_CONFIG_3D 0x00038
109#define MALIDP500_BGND_COLOR 0x0003c
110#define MALIDP500_OUTPUT_DEPTH 0x00044
111#define MALIDP500_YUV_RGB_COEF 0x00048
112#define MALIDP500_COLOR_ADJ_COEF 0x00078
113#define MALIDP500_COEF_TABLE_ADDR 0x000a8
114#define MALIDP500_COEF_TABLE_DATA 0x000ac
115#define MALIDP500_DE_LV_BASE 0x00100
116#define MALIDP500_DE_LV_PTR_BASE 0x00124
117#define MALIDP500_DE_LG1_BASE 0x00200
118#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
119#define MALIDP500_DE_LG2_BASE 0x00300
120#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
121#define MALIDP500_SE_BASE 0x00c00
122#define MALIDP500_SE_PTR_BASE 0x00e0c
123#define MALIDP500_DC_IRQ_BASE 0x00f00
124#define MALIDP500_CONFIG_VALID 0x00f00
125#define MALIDP500_CONFIG_ID 0x00fd4
126
127/* register offsets and bits specific to DP550/DP650 */
128#define MALIDP550_DE_CONTROL 0x00010
129#define MALIDP550_DE_LINE_COUNTER 0x00014
130#define MALIDP550_DE_AXI_CONTROL 0x00018
131#define MALIDP550_DE_QOS 0x0001c
132#define MALIDP550_TIMINGS_BASE 0x00030
133#define MALIDP550_HSYNCPOL (1 << 12)
134#define MALIDP550_VSYNCPOL (1 << 28)
135
136#define MALIDP550_DE_DISP_SIDEBAND 0x00040
137#define MALIDP550_DE_BGND_COLOR 0x00044
138#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
139#define MALIDP550_DE_COLOR_COEF 0x00050
140#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
141#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
142#define MALIDP550_DE_LV1_BASE 0x00100
143#define MALIDP550_DE_LV1_PTR_BASE 0x00124
144#define MALIDP550_DE_LV2_BASE 0x00200
145#define MALIDP550_DE_LV2_PTR_BASE 0x00224
146#define MALIDP550_DE_LG_BASE 0x00300
147#define MALIDP550_DE_LG_PTR_BASE 0x0031c
148#define MALIDP550_DE_LS_BASE 0x00400
149#define MALIDP550_DE_LS_PTR_BASE 0x0042c
150#define MALIDP550_DE_PERF_BASE 0x00500
151#define MALIDP550_SE_BASE 0x08000
152#define MALIDP550_DC_BASE 0x0c000
153#define MALIDP550_DC_CONTROL 0x0c010
154#define MALIDP550_DC_CONFIG_REQ (1 << 16)
155#define MALIDP550_CONFIG_VALID 0x0c014
156#define MALIDP550_CONFIG_ID 0x0ffd4
157
158/*
159 * Starting with DP550 the register map blocks has been standardised to the
160 * following layout:
161 *
162 * Offset Block registers
163 * 0x00000 Display Engine
164 * 0x08000 Scaling Engine
165 * 0x0c000 Display Core
166 * 0x10000 Secure control
167 *
168 * The old DP500 IP mixes some DC with the DE registers, hence the need
169 * for a mapping structure.
170 */
171
172#endif /* __MALIDP_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bcdd0..34405e4a5d36 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
199 /* Handle any pending frame work. */ 199 /* Handle any pending frame work. */
200 if (work) { 200 if (work) {
201 work->fn(dcrtc, plane, work); 201 work->fn(dcrtc, plane, work);
202 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 202 drm_crtc_vblank_put(&dcrtc->crtc);
203 } 203 }
204 204
205 wake_up(&plane->frame_wait); 205 wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
210{ 210{
211 int ret; 211 int ret;
212 212
213 ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 213 ret = drm_crtc_vblank_get(&dcrtc->crtc);
214 if (ret) { 214 if (ret) {
215 DRM_ERROR("failed to acquire vblank counter\n"); 215 DRM_ERROR("failed to acquire vblank counter\n");
216 return ret; 216 return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
218 218
219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; 219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
220 if (ret) 220 if (ret)
221 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 221 drm_crtc_vblank_put(&dcrtc->crtc);
222 222
223 return ret; 223 return ret;
224} 224}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
234 struct armada_plane_work *work = xchg(&plane->work, NULL); 234 struct armada_plane_work *work = xchg(&plane->work, NULL);
235 235
236 if (work) 236 if (work)
237 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 237 drm_crtc_vblank_put(&dcrtc->crtc);
238 238
239 return work; 239 return work;
240} 240}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
260 260
261 if (fwork->event) { 261 if (fwork->event) {
262 spin_lock_irqsave(&dev->event_lock, flags); 262 spin_lock_irqsave(&dev->event_lock, flags);
263 drm_send_vblank_event(dev, dcrtc->num, fwork->event); 263 drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
264 spin_unlock_irqrestore(&dev->event_lock, flags); 264 spin_unlock_irqrestore(&dev->event_lock, flags);
265 } 265 }
266 266
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
592 592
593 if (interlaced ^ dcrtc->interlaced) { 593 if (interlaced ^ dcrtc->interlaced) {
594 if (adj->flags & DRM_MODE_FLAG_INTERLACE) 594 if (adj->flags & DRM_MODE_FLAG_INTERLACE)
595 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 595 drm_crtc_vblank_get(&dcrtc->crtc);
596 else 596 else
597 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 597 drm_crtc_vblank_put(&dcrtc->crtc);
598 dcrtc->interlaced = interlaced; 598 dcrtc->interlaced = interlaced;
599 } 599 }
600 600
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61aa5..f5ebdd681445 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = {
189 .load = armada_drm_load, 189 .load = armada_drm_load,
190 .lastclose = armada_drm_lastclose, 190 .lastclose = armada_drm_lastclose,
191 .unload = armada_drm_unload, 191 .unload = armada_drm_unload,
192 .set_busid = drm_platform_set_busid,
193 .get_vblank_counter = drm_vblank_no_hw_counter, 192 .get_vblank_counter = drm_vblank_no_hw_counter,
194 .enable_vblank = armada_drm_enable_vblank, 193 .enable_vblank = armada_drm_enable_vblank,
195 .disable_vblank = armada_drm_disable_vblank, 194 .disable_vblank = armada_drm_disable_vblank,
@@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = {
197 .debugfs_init = armada_drm_debugfs_init, 196 .debugfs_init = armada_drm_debugfs_init,
198 .debugfs_cleanup = armada_drm_debugfs_cleanup, 197 .debugfs_cleanup = armada_drm_debugfs_cleanup,
199#endif 198#endif
200 .gem_free_object = armada_gem_free_object, 199 .gem_free_object_unlocked = armada_gem_free_object,
201 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 200 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
202 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 201 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
203 .gem_prime_export = armada_gem_prime_export, 202 .gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 148e8a42b2c6..1ee707ef6b8d 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
121 int ret; 121 int ret;
122 122
123 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, 123 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
124 BIT(DRM_ROTATE_0),
124 0, INT_MAX, true, false, &visible); 125 0, INT_MAX, true, false, &visible);
125 if (ret) 126 if (ret)
126 return ret; 127 return ret;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714836..f54afd2113a9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
209 .minor = DRIVER_MINOR, 209 .minor = DRIVER_MINOR,
210 .patchlevel = DRIVER_PATCHLEVEL, 210 .patchlevel = DRIVER_PATCHLEVEL,
211 211
212 .gem_free_object = ast_gem_free_object, 212 .gem_free_object_unlocked = ast_gem_free_object,
213 .dumb_create = ast_dumb_create, 213 .dumb_create = ast_dumb_create,
214 .dumb_map_offset = ast_dumb_mmap_offset, 214 .dumb_map_offset = ast_dumb_mmap_offset,
215 .dumb_destroy = drm_gem_dumb_destroy, 215 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57884..c017a9330a18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
167 struct drm_gem_object **gobj_p) 167 struct drm_gem_object **gobj_p)
168{ 168{
169 struct drm_device *dev = afbdev->helper.dev; 169 struct drm_device *dev = afbdev->helper.dev;
170 u32 bpp, depth;
171 u32 size; 170 u32 size;
172 struct drm_gem_object *gobj; 171 struct drm_gem_object *gobj;
173
174 int ret = 0; 172 int ret = 0;
175 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
176 173
177 size = mode_cmd->pitches[0] * mode_cmd->height; 174 size = mode_cmd->pitches[0] * mode_cmd->height;
178 ret = ast_gem_create(dev, size, true, &gobj); 175 ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c337922606e3..5957c3e659fe 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
624 624
625} 625}
626 626
627static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 627static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
628 u16 *blue, uint32_t start, uint32_t size) 628 u16 *blue, uint32_t size)
629{ 629{
630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc); 630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
631 int end = (start + size > 256) ? 256 : start + size, i; 631 int i;
632 632
633 /* userspace palettes are always correct as is */ 633 /* userspace palettes are always correct as is */
634 for (i = start; i < end; i++) { 634 for (i = 0; i < size; i++) {
635 ast_crtc->lut_r[i] = red[i] >> 8; 635 ast_crtc->lut_r[i] = red[i] >> 8;
636 ast_crtc->lut_g[i] = green[i] >> 8; 636 ast_crtc->lut_g[i] = green[i] >> 8;
637 ast_crtc->lut_b[i] = blue[i] >> 8; 637 ast_crtc->lut_b[i] = blue[i] >> 8;
638 } 638 }
639 ast_crtc_load_lut(crtc); 639 ast_crtc_load_lut(crtc);
640
641 return 0;
640} 642}
641 643
642 644
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index cf23a755f777..613f6c99b76a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
374 374
375 spin_lock_irqsave(&dev->event_lock, flags); 375 spin_lock_irqsave(&dev->event_lock, flags);
376 if (crtc->event) { 376 if (crtc->event) {
377 drm_send_vblank_event(dev, crtc->id, crtc->event); 377 drm_crtc_send_vblank_event(&crtc->base, crtc->event);
378 drm_vblank_put(dev, crtc->id); 378 drm_crtc_vblank_put(&crtc->base);
379 crtc->event = NULL; 379 crtc->event = NULL;
380 } 380 }
381 spin_unlock_irqrestore(&dev->event_lock, flags); 381 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
391{ 391{
392 struct atmel_hlcdc_crtc_state *state; 392 struct atmel_hlcdc_crtc_state *state;
393 393
394 if (crtc->state && crtc->state->mode_blob)
395 drm_property_unreference_blob(crtc->state->mode_blob);
396
397 if (crtc->state) { 394 if (crtc->state) {
395 __drm_atomic_helper_crtc_destroy_state(crtc->state);
398 state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); 396 state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
399 kfree(state); 397 kfree(state);
398 crtc->state = NULL;
400 } 399 }
401 400
402 state = kzalloc(sizeof(*state), GFP_KERNEL); 401 state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
415 return NULL; 414 return NULL;
416 415
417 state = kmalloc(sizeof(*state), GFP_KERNEL); 416 state = kmalloc(sizeof(*state), GFP_KERNEL);
418 if (state) 417 if (!state)
419 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 418 return NULL;
419 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
420 420
421 cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); 421 cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
422 state->output_mode = cur->output_mode; 422 state->output_mode = cur->output_mode;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded7645747e..d4a3d61b7b06 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
519 } 519 }
520 520
521 /* Swap the state, this is the point of no return. */ 521 /* Swap the state, this is the point of no return. */
522 drm_atomic_helper_swap_state(dev, state); 522 drm_atomic_helper_swap_state(state, true);
523 523
524 if (async) 524 if (async)
525 queue_work(dc->wq, &commit->work); 525 queue_work(dc->wq, &commit->work);
@@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
691 destroy_workqueue(dc->wq); 691 destroy_workqueue(dc->wq);
692} 692}
693 693
694static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
695{
696 mutex_lock(&dev->mode_config.mutex);
697 drm_connector_unregister_all(dev);
698 mutex_unlock(&dev->mode_config.mutex);
699}
700
701static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) 694static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
702{ 695{
703 struct atmel_hlcdc_dc *dc = dev->dev_private; 696 struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
776 .get_vblank_counter = drm_vblank_no_hw_counter, 769 .get_vblank_counter = drm_vblank_no_hw_counter,
777 .enable_vblank = atmel_hlcdc_dc_enable_vblank, 770 .enable_vblank = atmel_hlcdc_dc_enable_vblank,
778 .disable_vblank = atmel_hlcdc_dc_disable_vblank, 771 .disable_vblank = atmel_hlcdc_dc_disable_vblank,
779 .gem_free_object = drm_gem_cma_free_object, 772 .gem_free_object_unlocked = drm_gem_cma_free_object,
780 .gem_vm_ops = &drm_gem_cma_vm_ops, 773 .gem_vm_ops = &drm_gem_cma_vm_ops,
781 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 774 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
782 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 775 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
815 if (ret) 808 if (ret)
816 goto err_unload; 809 goto err_unload;
817 810
818 ret = drm_connector_register_all(ddev);
819 if (ret)
820 goto err_unregister;
821
822 return 0; 811 return 0;
823 812
824err_unregister:
825 drm_dev_unregister(ddev);
826
827err_unload: 813err_unload:
828 atmel_hlcdc_dc_unload(ddev); 814 atmel_hlcdc_dc_unload(ddev);
829 815
@@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
837{ 823{
838 struct drm_device *ddev = platform_get_drvdata(pdev); 824 struct drm_device *ddev = platform_get_drvdata(pdev);
839 825
840 atmel_hlcdc_dc_connector_unplug_all(ddev);
841 drm_dev_unregister(ddev); 826 drm_dev_unregister(ddev);
842 atmel_hlcdc_dc_unload(ddev); 827 atmel_hlcdc_dc_unload(ddev);
843 drm_dev_unref(ddev); 828 drm_dev_unref(ddev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 39802c0539b6..6119b5085501 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); 113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
114} 114}
115 115
116
117
118static struct drm_encoder *
119atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
120{
121 struct atmel_hlcdc_rgb_output *rgb =
122 drm_connector_to_atmel_hlcdc_rgb_output(connector);
123
124 return &rgb->encoder;
125}
126
127static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { 116static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
128 .get_modes = atmel_hlcdc_panel_get_modes, 117 .get_modes = atmel_hlcdc_panel_get_modes,
129 .mode_valid = atmel_hlcdc_rgb_mode_valid, 118 .mode_valid = atmel_hlcdc_rgb_mode_valid,
130 .best_encoder = atmel_hlcdc_rgb_best_encoder,
131}; 119};
132 120
133static enum drm_connector_status 121static enum drm_connector_status
@@ -266,9 +254,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
266 if (!ret) 254 if (!ret)
267 ret = atmel_hlcdc_check_endpoint(dev, &ep); 255 ret = atmel_hlcdc_check_endpoint(dev, &ep);
268 256
269 of_node_put(ep_np); 257 if (ret) {
270 if (ret) 258 of_node_put(ep_np);
271 return ret; 259 return ret;
260 }
272 } 261 }
273 262
274 for_each_endpoint_of_node(dev->dev->of_node, ep_np) { 263 for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
@@ -276,9 +265,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
276 if (!ret) 265 if (!ret)
277 ret = atmel_hlcdc_attach_endpoint(dev, &ep); 266 ret = atmel_hlcdc_attach_endpoint(dev, &ep);
278 267
279 of_node_put(ep_np); 268 if (ret) {
280 if (ret) 269 of_node_put(ep_np);
281 return ret; 270 return ret;
271 }
282 } 272 }
283 273
284 return 0; 274 return 0;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index aef3ca8a81fa..016c191221f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
339 339
340 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 340 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
341 factor_reg); 341 factor_reg);
342 } else {
343 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
342 } 344 }
343} 345}
344 346
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b0e2..abace82de6ea 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
89 .date = "20130925", 89 .date = "20130925",
90 .major = 1, 90 .major = 1,
91 .minor = 0, 91 .minor = 0,
92 .gem_free_object = bochs_gem_free_object, 92 .gem_free_object_unlocked = bochs_gem_free_object,
93 .dumb_create = bochs_dumb_create, 93 .dumb_create = bochs_dumb_create,
94 .dumb_map_offset = bochs_dumb_mmap_offset, 94 .dumb_map_offset = bochs_dumb_mmap_offset,
95 .dumb_destroy = drm_gem_dumb_destroy, 95 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8f7423f18da5..a141921445f4 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -50,6 +50,14 @@ config DRM_PARADE_PS8622
50 ---help--- 50 ---help---
51 Parade eDP-LVDS bridge chip driver. 51 Parade eDP-LVDS bridge chip driver.
52 52
53config DRM_SII902X
54 tristate "Silicon Image sii902x RGB/HDMI bridge"
55 depends on OF
56 select DRM_KMS_HELPER
57 select REGMAP_I2C
58 ---help---
59 Silicon Image sii902x bridge chip driver.
60
53source "drivers/gpu/drm/bridge/analogix/Kconfig" 61source "drivers/gpu/drm/bridge/analogix/Kconfig"
54 62
55endmenu 63endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 96b13b30e6ab..bfec9f8cb9d2 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
5obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o 5obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
6obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o 6obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
7obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o 7obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
8obj-$(CONFIG_DRM_SII902X) += sii902x.o
8obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ 9obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index d087b054c360..f9f03bcba0af 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -986,16 +986,8 @@ unlock:
986 return num_modes; 986 return num_modes;
987} 987}
988 988
989static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
990{
991 struct anx78xx *anx78xx = connector_to_anx78xx(connector);
992
993 return anx78xx->bridge.encoder;
994}
995
996static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { 989static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
997 .get_modes = anx78xx_get_modes, 990 .get_modes = anx78xx_get_modes,
998 .best_encoder = anx78xx_best_encoder,
999}; 991};
1000 992
1001static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, 993static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index c9d941283d30..70b1f7d4270b 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
1476 return mode_status; 1476 return mode_status;
1477} 1477}
1478 1478
1479static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
1480 *connector)
1481{
1482 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1483 connector);
1484
1485 return hdmi->encoder;
1486}
1487
1488static void dw_hdmi_connector_destroy(struct drm_connector *connector) 1479static void dw_hdmi_connector_destroy(struct drm_connector *connector)
1489{ 1480{
1490 drm_connector_unregister(connector); 1481 drm_connector_unregister(connector);
@@ -1525,7 +1516,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
1525static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { 1516static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
1526 .get_modes = dw_hdmi_connector_get_modes, 1517 .get_modes = dw_hdmi_connector_get_modes,
1527 .mode_valid = dw_hdmi_connector_mode_valid, 1518 .mode_valid = dw_hdmi_connector_mode_valid,
1528 .best_encoder = dw_hdmi_connector_best_encoder, 1519 .best_encoder = drm_atomic_helper_best_encoder,
1529}; 1520};
1530 1521
1531static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { 1522static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7ecd59f70b8e..93f3dacf9e27 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -235,16 +235,8 @@ out:
235 return num_modes; 235 return num_modes;
236} 236}
237 237
238static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
239{
240 struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
241
242 return ptn_bridge->bridge.encoder;
243}
244
245static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { 238static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
246 .get_modes = ptn3460_get_modes, 239 .get_modes = ptn3460_get_modes,
247 .best_encoder = ptn3460_best_encoder,
248}; 240};
249 241
250static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, 242static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index be881e9fef8f..5cd8dd7e5904 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
474 return drm_panel_get_modes(ps8622->panel); 474 return drm_panel_get_modes(ps8622->panel);
475} 475}
476 476
477static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
478{
479 struct ps8622_bridge *ps8622;
480
481 ps8622 = connector_to_ps8622(connector);
482
483 return ps8622->bridge.encoder;
484}
485
486static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { 477static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
487 .get_modes = ps8622_get_modes, 478 .get_modes = ps8622_get_modes,
488 .best_encoder = ps8622_best_encoder,
489}; 479};
490 480
491static enum drm_connector_status ps8622_detect(struct drm_connector *connector, 481static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
new file mode 100644
index 000000000000..9126d0306ab5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -0,0 +1,467 @@
1/*
2 * Copyright (C) 2016 Atmel
3 * Bo Shen <voice.shen@atmel.com>
4 *
5 * Authors: Bo Shen <voice.shen@atmel.com>
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Wu, Songjun <Songjun.Wu@atmel.com>
8 *
9 *
10 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/gpio/consumer.h>
24#include <linux/i2c.h>
25#include <linux/module.h>
26#include <linux/regmap.h>
27
28#include <drm/drmP.h>
29#include <drm/drm_atomic_helper.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/drm_edid.h>
32
33#define SII902X_TPI_VIDEO_DATA 0x0
34
35#define SII902X_TPI_PIXEL_REPETITION 0x8
36#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5)
37#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4)
38#define SII902X_TPI_AVI_PIXEL_REP_4X 3
39#define SII902X_TPI_AVI_PIXEL_REP_2X 1
40#define SII902X_TPI_AVI_PIXEL_REP_NONE 0
41#define SII902X_TPI_CLK_RATIO_HALF (0 << 6)
42#define SII902X_TPI_CLK_RATIO_1X (1 << 6)
43#define SII902X_TPI_CLK_RATIO_2X (2 << 6)
44#define SII902X_TPI_CLK_RATIO_4X (3 << 6)
45
46#define SII902X_TPI_AVI_IN_FORMAT 0x9
47#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7)
48#define SII902X_TPI_AVI_INPUT_DITHER BIT(6)
49#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2)
50#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2)
51#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2)
52#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0)
53#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0)
54#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0)
55#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0)
56
57#define SII902X_TPI_AVI_INFOFRAME 0x0c
58
59#define SII902X_SYS_CTRL_DATA 0x1a
60#define SII902X_SYS_CTRL_PWR_DWN BIT(4)
61#define SII902X_SYS_CTRL_AV_MUTE BIT(3)
62#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2)
63#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1)
64#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0)
65#define SII902X_SYS_CTRL_OUTPUT_HDMI 1
66#define SII902X_SYS_CTRL_OUTPUT_DVI 0
67
68#define SII902X_REG_CHIPID(n) (0x1b + (n))
69
70#define SII902X_PWR_STATE_CTRL 0x1e
71#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0)
72#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK)
73
74#define SII902X_INT_ENABLE 0x3c
75#define SII902X_INT_STATUS 0x3d
76#define SII902X_HOTPLUG_EVENT BIT(0)
77#define SII902X_PLUGGED_STATUS BIT(2)
78
79#define SII902X_REG_TPI_RQB 0xc7
80
81#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
82
83struct sii902x {
84 struct i2c_client *i2c;
85 struct regmap *regmap;
86 struct drm_bridge bridge;
87 struct drm_connector connector;
88 struct gpio_desc *reset_gpio;
89};
90
91static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
92{
93 return container_of(bridge, struct sii902x, bridge);
94}
95
96static inline struct sii902x *connector_to_sii902x(struct drm_connector *con)
97{
98 return container_of(con, struct sii902x, connector);
99}
100
101static void sii902x_reset(struct sii902x *sii902x)
102{
103 if (!sii902x->reset_gpio)
104 return;
105
106 gpiod_set_value(sii902x->reset_gpio, 1);
107
108 /* The datasheet says treset-min = 100us. Make it 150us to be sure. */
109 usleep_range(150, 200);
110
111 gpiod_set_value(sii902x->reset_gpio, 0);
112}
113
114static enum drm_connector_status
115sii902x_connector_detect(struct drm_connector *connector, bool force)
116{
117 struct sii902x *sii902x = connector_to_sii902x(connector);
118 unsigned int status;
119
120 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
121
122 return (status & SII902X_PLUGGED_STATUS) ?
123 connector_status_connected : connector_status_disconnected;
124}
125
126static const struct drm_connector_funcs sii902x_connector_funcs = {
127 .dpms = drm_atomic_helper_connector_dpms,
128 .detect = sii902x_connector_detect,
129 .fill_modes = drm_helper_probe_single_connector_modes,
130 .destroy = drm_connector_cleanup,
131 .reset = drm_atomic_helper_connector_reset,
132 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
133 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
134};
135
136static int sii902x_get_modes(struct drm_connector *connector)
137{
138 struct sii902x *sii902x = connector_to_sii902x(connector);
139 struct regmap *regmap = sii902x->regmap;
140 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
141 unsigned long timeout;
142 unsigned int status;
143 struct edid *edid;
144 int num = 0;
145 int ret;
146
147 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
148 SII902X_SYS_CTRL_DDC_BUS_REQ,
149 SII902X_SYS_CTRL_DDC_BUS_REQ);
150 if (ret)
151 return ret;
152
153 timeout = jiffies +
154 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
155 do {
156 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
157 if (ret)
158 return ret;
159 } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
160 time_before(jiffies, timeout));
161
162 if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
163 dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus");
164 return -ETIMEDOUT;
165 }
166
167 ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
168 if (ret)
169 return ret;
170
171 edid = drm_get_edid(connector, sii902x->i2c->adapter);
172 drm_mode_connector_update_edid_property(connector, edid);
173 if (edid) {
174 num = drm_add_edid_modes(connector, edid);
175 kfree(edid);
176 }
177
178 ret = drm_display_info_set_bus_formats(&connector->display_info,
179 &bus_format, 1);
180 if (ret)
181 return ret;
182
183 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
184 if (ret)
185 return ret;
186
187 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
188 SII902X_SYS_CTRL_DDC_BUS_REQ |
189 SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
190 if (ret)
191 return ret;
192
193 timeout = jiffies +
194 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
195 do {
196 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
197 if (ret)
198 return ret;
199 } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
200 SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
201 time_before(jiffies, timeout));
202
203 if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
204 SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
205 dev_err(&sii902x->i2c->dev, "failed to release the i2c bus");
206 return -ETIMEDOUT;
207 }
208
209 return num;
210}
211
212static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
213 struct drm_display_mode *mode)
214{
215 /* TODO: check mode */
216
217 return MODE_OK;
218}
219
220static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = {
221 .get_modes = sii902x_get_modes,
222 .mode_valid = sii902x_mode_valid,
223};
224
225static void sii902x_bridge_disable(struct drm_bridge *bridge)
226{
227 struct sii902x *sii902x = bridge_to_sii902x(bridge);
228
229 regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
230 SII902X_SYS_CTRL_PWR_DWN,
231 SII902X_SYS_CTRL_PWR_DWN);
232}
233
234static void sii902x_bridge_enable(struct drm_bridge *bridge)
235{
236 struct sii902x *sii902x = bridge_to_sii902x(bridge);
237
238 regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
239 SII902X_AVI_POWER_STATE_MSK,
240 SII902X_AVI_POWER_STATE_D(0));
241 regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
242 SII902X_SYS_CTRL_PWR_DWN, 0);
243}
244
245static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
246 struct drm_display_mode *mode,
247 struct drm_display_mode *adj)
248{
249 struct sii902x *sii902x = bridge_to_sii902x(bridge);
250 struct regmap *regmap = sii902x->regmap;
251 u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
252 struct hdmi_avi_infoframe frame;
253 int ret;
254
255 buf[0] = adj->clock;
256 buf[1] = adj->clock >> 8;
257 buf[2] = adj->vrefresh;
258 buf[3] = 0x00;
259 buf[4] = adj->hdisplay;
260 buf[5] = adj->hdisplay >> 8;
261 buf[6] = adj->vdisplay;
262 buf[7] = adj->vdisplay >> 8;
263 buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE |
264 SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT;
265 buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
266 SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
267
268 ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
269 if (ret)
270 return;
271
272 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
273 if (ret < 0) {
274 DRM_ERROR("couldn't fill AVI infoframe\n");
275 return;
276 }
277
278 ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
279 if (ret < 0) {
280 DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
281 return;
282 }
283
284 /* Do not send the infoframe header, but keep the CRC field. */
285 regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
286 buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
287 HDMI_AVI_INFOFRAME_SIZE + 1);
288}
289
290static int sii902x_bridge_attach(struct drm_bridge *bridge)
291{
292 struct sii902x *sii902x = bridge_to_sii902x(bridge);
293 struct drm_device *drm = bridge->dev;
294 int ret;
295
296 drm_connector_helper_add(&sii902x->connector,
297 &sii902x_connector_helper_funcs);
298
299 if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
300 dev_err(&sii902x->i2c->dev,
301 "sii902x driver is only compatible with DRM devices supporting atomic updates");
302 return -ENOTSUPP;
303 }
304
305 ret = drm_connector_init(drm, &sii902x->connector,
306 &sii902x_connector_funcs,
307 DRM_MODE_CONNECTOR_HDMIA);
308 if (ret)
309 return ret;
310
311 if (sii902x->i2c->irq > 0)
312 sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD;
313 else
314 sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
315
316 drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
317
318 return 0;
319}
320
321static const struct drm_bridge_funcs sii902x_bridge_funcs = {
322 .attach = sii902x_bridge_attach,
323 .mode_set = sii902x_bridge_mode_set,
324 .disable = sii902x_bridge_disable,
325 .enable = sii902x_bridge_enable,
326};
327
328static const struct regmap_range sii902x_volatile_ranges[] = {
329 { .range_min = 0, .range_max = 0xff },
330};
331
332static const struct regmap_access_table sii902x_volatile_table = {
333 .yes_ranges = sii902x_volatile_ranges,
334 .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges),
335};
336
337static const struct regmap_config sii902x_regmap_config = {
338 .reg_bits = 8,
339 .val_bits = 8,
340 .volatile_table = &sii902x_volatile_table,
341 .cache_type = REGCACHE_NONE,
342};
343
344static irqreturn_t sii902x_interrupt(int irq, void *data)
345{
346 struct sii902x *sii902x = data;
347 unsigned int status = 0;
348
349 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
350 regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
351
352 if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
353 drm_helper_hpd_irq_event(sii902x->bridge.dev);
354
355 return IRQ_HANDLED;
356}
357
358static int sii902x_probe(struct i2c_client *client,
359 const struct i2c_device_id *id)
360{
361 struct device *dev = &client->dev;
362 unsigned int status = 0;
363 struct sii902x *sii902x;
364 u8 chipid[4];
365 int ret;
366
367 sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
368 if (!sii902x)
369 return -ENOMEM;
370
371 sii902x->i2c = client;
372 sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
373 if (IS_ERR(sii902x->regmap))
374 return PTR_ERR(sii902x->regmap);
375
376 sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
377 GPIOD_OUT_LOW);
378 if (IS_ERR(sii902x->reset_gpio)) {
379 dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n",
380 PTR_ERR(sii902x->reset_gpio));
381 return PTR_ERR(sii902x->reset_gpio);
382 }
383
384 sii902x_reset(sii902x);
385
386 ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
387 if (ret)
388 return ret;
389
390 ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
391 &chipid, 4);
392 if (ret) {
393 dev_err(dev, "regmap_read failed %d\n", ret);
394 return ret;
395 }
396
397 if (chipid[0] != 0xb0) {
398 dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
399 chipid[0]);
400 return -EINVAL;
401 }
402
403 /* Clear all pending interrupts */
404 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
405 regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
406
407 if (client->irq > 0) {
408 regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
409 SII902X_HOTPLUG_EVENT);
410
411 ret = devm_request_threaded_irq(dev, client->irq, NULL,
412 sii902x_interrupt,
413 IRQF_ONESHOT, dev_name(dev),
414 sii902x);
415 if (ret)
416 return ret;
417 }
418
419 sii902x->bridge.funcs = &sii902x_bridge_funcs;
420 sii902x->bridge.of_node = dev->of_node;
421 ret = drm_bridge_add(&sii902x->bridge);
422 if (ret) {
423 dev_err(dev, "Failed to add drm_bridge\n");
424 return ret;
425 }
426
427 i2c_set_clientdata(client, sii902x);
428
429 return 0;
430}
431
432static int sii902x_remove(struct i2c_client *client)
433
434{
435 struct sii902x *sii902x = i2c_get_clientdata(client);
436
437 drm_bridge_remove(&sii902x->bridge);
438
439 return 0;
440}
441
442static const struct of_device_id sii902x_dt_ids[] = {
443 { .compatible = "sil,sii9022", },
444 { }
445};
446MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
447
448static const struct i2c_device_id sii902x_i2c_ids[] = {
449 { "sii9022", 0 },
450 { },
451};
452MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
453
454static struct i2c_driver sii902x_driver = {
455 .probe = sii902x_probe,
456 .remove = sii902x_remove,
457 .driver = {
458 .name = "sii902x",
459 .of_match_table = sii902x_dt_ids,
460 },
461 .id_table = sii902x_i2c_ids,
462};
463module_i2c_driver(sii902x_driver);
464
465MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
466MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges");
467MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da6f1..b05f7eae32ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
142 .major = DRIVER_MAJOR, 142 .major = DRIVER_MAJOR,
143 .minor = DRIVER_MINOR, 143 .minor = DRIVER_MINOR,
144 .patchlevel = DRIVER_PATCHLEVEL, 144 .patchlevel = DRIVER_PATCHLEVEL,
145 .gem_free_object = cirrus_gem_free_object, 145 .gem_free_object_unlocked = cirrus_gem_free_object,
146 .dumb_create = cirrus_dumb_create, 146 .dumb_create = cirrus_dumb_create,
147 .dumb_map_offset = cirrus_dumb_mmap_offset, 147 .dumb_map_offset = cirrus_dumb_mmap_offset,
148 .dumb_destroy = drm_gem_dumb_destroy, 148 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfcc57..17c915d9a03e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
325 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 325 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
326 * but it's a requirement that we provide the function 326 * but it's a requirement that we provide the function
327 */ 327 */
328static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 328static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
329 u16 *blue, uint32_t start, uint32_t size) 329 u16 *blue, uint32_t size)
330{ 330{
331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); 331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
332 int i; 332 int i;
333 333
334 if (size != CIRRUS_LUT_SIZE) 334 for (i = 0; i < size; i++) {
335 return;
336
337 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
338 cirrus_crtc->lut_r[i] = red[i]; 335 cirrus_crtc->lut_r[i] = red[i];
339 cirrus_crtc->lut_g[i] = green[i]; 336 cirrus_crtc->lut_g[i] = green[i];
340 cirrus_crtc->lut_b[i] = blue[i]; 337 cirrus_crtc->lut_b[i] = blue[i];
341 } 338 }
342 cirrus_crtc_load_lut(crtc); 339 cirrus_crtc_load_lut(crtc);
340
341 return 0;
343} 342}
344 343
345/* Simple cleanup function */ 344/* Simple cleanup function */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3ff1ed7b33db..3cee084e9d28 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,20 @@
33 33
34#include "drm_crtc_internal.h" 34#include "drm_crtc_internal.h"
35 35
36static void crtc_commit_free(struct kref *kref)
37{
38 struct drm_crtc_commit *commit =
39 container_of(kref, struct drm_crtc_commit, ref);
40
41 kfree(commit);
42}
43
44void drm_crtc_commit_put(struct drm_crtc_commit *commit)
45{
46 kref_put(&commit->ref, crtc_commit_free);
47}
48EXPORT_SYMBOL(drm_crtc_commit_put);
49
36/** 50/**
37 * drm_atomic_state_default_release - 51 * drm_atomic_state_default_release -
38 * release memory initialized by drm_atomic_state_init 52 * release memory initialized by drm_atomic_state_init
@@ -44,11 +58,8 @@
44void drm_atomic_state_default_release(struct drm_atomic_state *state) 58void drm_atomic_state_default_release(struct drm_atomic_state *state)
45{ 59{
46 kfree(state->connectors); 60 kfree(state->connectors);
47 kfree(state->connector_states);
48 kfree(state->crtcs); 61 kfree(state->crtcs);
49 kfree(state->crtc_states);
50 kfree(state->planes); 62 kfree(state->planes);
51 kfree(state->plane_states);
52} 63}
53EXPORT_SYMBOL(drm_atomic_state_default_release); 64EXPORT_SYMBOL(drm_atomic_state_default_release);
54 65
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
72 sizeof(*state->crtcs), GFP_KERNEL); 83 sizeof(*state->crtcs), GFP_KERNEL);
73 if (!state->crtcs) 84 if (!state->crtcs)
74 goto fail; 85 goto fail;
75 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
76 sizeof(*state->crtc_states), GFP_KERNEL);
77 if (!state->crtc_states)
78 goto fail;
79 state->planes = kcalloc(dev->mode_config.num_total_plane, 86 state->planes = kcalloc(dev->mode_config.num_total_plane,
80 sizeof(*state->planes), GFP_KERNEL); 87 sizeof(*state->planes), GFP_KERNEL);
81 if (!state->planes) 88 if (!state->planes)
82 goto fail; 89 goto fail;
83 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
84 sizeof(*state->plane_states), GFP_KERNEL);
85 if (!state->plane_states)
86 goto fail;
87 90
88 state->dev = dev; 91 state->dev = dev;
89 92
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
139 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
140 143
141 for (i = 0; i < state->num_connector; i++) { 144 for (i = 0; i < state->num_connector; i++) {
142 struct drm_connector *connector = state->connectors[i]; 145 struct drm_connector *connector = state->connectors[i].ptr;
143 146
144 if (!connector) 147 if (!connector)
145 continue; 148 continue;
146 149
147 connector->funcs->atomic_destroy_state(connector, 150 connector->funcs->atomic_destroy_state(connector,
148 state->connector_states[i]); 151 state->connectors[i].state);
149 state->connectors[i] = NULL; 152 state->connectors[i].ptr = NULL;
150 state->connector_states[i] = NULL; 153 state->connectors[i].state = NULL;
151 drm_connector_unreference(connector); 154 drm_connector_unreference(connector);
152 } 155 }
153 156
154 for (i = 0; i < config->num_crtc; i++) { 157 for (i = 0; i < config->num_crtc; i++) {
155 struct drm_crtc *crtc = state->crtcs[i]; 158 struct drm_crtc *crtc = state->crtcs[i].ptr;
156 159
157 if (!crtc) 160 if (!crtc)
158 continue; 161 continue;
159 162
160 crtc->funcs->atomic_destroy_state(crtc, 163 crtc->funcs->atomic_destroy_state(crtc,
161 state->crtc_states[i]); 164 state->crtcs[i].state);
162 state->crtcs[i] = NULL; 165
163 state->crtc_states[i] = NULL; 166 if (state->crtcs[i].commit) {
167 kfree(state->crtcs[i].commit->event);
168 state->crtcs[i].commit->event = NULL;
169 drm_crtc_commit_put(state->crtcs[i].commit);
170 }
171
172 state->crtcs[i].commit = NULL;
173 state->crtcs[i].ptr = NULL;
174 state->crtcs[i].state = NULL;
164 } 175 }
165 176
166 for (i = 0; i < config->num_total_plane; i++) { 177 for (i = 0; i < config->num_total_plane; i++) {
167 struct drm_plane *plane = state->planes[i]; 178 struct drm_plane *plane = state->planes[i].ptr;
168 179
169 if (!plane) 180 if (!plane)
170 continue; 181 continue;
171 182
172 plane->funcs->atomic_destroy_state(plane, 183 plane->funcs->atomic_destroy_state(plane,
173 state->plane_states[i]); 184 state->planes[i].state);
174 state->planes[i] = NULL; 185 state->planes[i].ptr = NULL;
175 state->plane_states[i] = NULL; 186 state->planes[i].state = NULL;
176 } 187 }
177} 188}
178EXPORT_SYMBOL(drm_atomic_state_default_clear); 189EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
270 if (!crtc_state) 281 if (!crtc_state)
271 return ERR_PTR(-ENOMEM); 282 return ERR_PTR(-ENOMEM);
272 283
273 state->crtc_states[index] = crtc_state; 284 state->crtcs[index].state = crtc_state;
274 state->crtcs[index] = crtc; 285 state->crtcs[index].ptr = crtc;
275 crtc_state->state = state; 286 crtc_state->state = state;
276 287
277 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 288 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -351,6 +362,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
351 drm_property_unreference_blob(state->mode_blob); 362 drm_property_unreference_blob(state->mode_blob);
352 state->mode_blob = NULL; 363 state->mode_blob = NULL;
353 364
365 memset(&state->mode, 0, sizeof(state->mode));
366
354 if (blob) { 367 if (blob) {
355 if (blob->length != sizeof(struct drm_mode_modeinfo) || 368 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
356 drm_mode_convert_umode(&state->mode, 369 drm_mode_convert_umode(&state->mode,
@@ -363,7 +376,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
363 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 376 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
364 state->mode.name, state); 377 state->mode.name, state);
365 } else { 378 } else {
366 memset(&state->mode, 0, sizeof(state->mode));
367 state->enable = false; 379 state->enable = false;
368 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 380 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
369 state); 381 state);
@@ -631,8 +643,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
631 if (!plane_state) 643 if (!plane_state)
632 return ERR_PTR(-ENOMEM); 644 return ERR_PTR(-ENOMEM);
633 645
634 state->plane_states[index] = plane_state; 646 state->planes[index].state = plane_state;
635 state->planes[index] = plane; 647 state->planes[index].ptr = plane;
636 plane_state->state = state; 648 plane_state->state = state;
637 649
638 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 650 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -896,8 +908,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
896 index = drm_connector_index(connector); 908 index = drm_connector_index(connector);
897 909
898 if (index >= state->num_connector) { 910 if (index >= state->num_connector) {
899 struct drm_connector **c; 911 struct __drm_connnectors_state *c;
900 struct drm_connector_state **cs;
901 int alloc = max(index + 1, config->num_connector); 912 int alloc = max(index + 1, config->num_connector);
902 913
903 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 914 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -908,26 +919,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
908 memset(&state->connectors[state->num_connector], 0, 919 memset(&state->connectors[state->num_connector], 0,
909 sizeof(*state->connectors) * (alloc - state->num_connector)); 920 sizeof(*state->connectors) * (alloc - state->num_connector));
910 921
911 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
912 if (!cs)
913 return ERR_PTR(-ENOMEM);
914
915 state->connector_states = cs;
916 memset(&state->connector_states[state->num_connector], 0,
917 sizeof(*state->connector_states) * (alloc - state->num_connector));
918 state->num_connector = alloc; 922 state->num_connector = alloc;
919 } 923 }
920 924
921 if (state->connector_states[index]) 925 if (state->connectors[index].state)
922 return state->connector_states[index]; 926 return state->connectors[index].state;
923 927
924 connector_state = connector->funcs->atomic_duplicate_state(connector); 928 connector_state = connector->funcs->atomic_duplicate_state(connector);
925 if (!connector_state) 929 if (!connector_state)
926 return ERR_PTR(-ENOMEM); 930 return ERR_PTR(-ENOMEM);
927 931
928 drm_connector_reference(connector); 932 drm_connector_reference(connector);
929 state->connector_states[index] = connector_state; 933 state->connectors[index].state = connector_state;
930 state->connectors[index] = connector; 934 state->connectors[index].ptr = connector;
931 connector_state->state = state; 935 connector_state->state = state;
932 936
933 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", 937 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1295,14 +1299,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1295 */ 1299 */
1296void drm_atomic_legacy_backoff(struct drm_atomic_state *state) 1300void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1297{ 1301{
1302 struct drm_device *dev = state->dev;
1303 unsigned crtc_mask = 0;
1304 struct drm_crtc *crtc;
1298 int ret; 1305 int ret;
1306 bool global = false;
1307
1308 drm_for_each_crtc(crtc, dev) {
1309 if (crtc->acquire_ctx != state->acquire_ctx)
1310 continue;
1311
1312 crtc_mask |= drm_crtc_mask(crtc);
1313 crtc->acquire_ctx = NULL;
1314 }
1315
1316 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1317 global = true;
1318
1319 dev->mode_config.acquire_ctx = NULL;
1320 }
1299 1321
1300retry: 1322retry:
1301 drm_modeset_backoff(state->acquire_ctx); 1323 drm_modeset_backoff(state->acquire_ctx);
1302 1324
1303 ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); 1325 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
1304 if (ret) 1326 if (ret)
1305 goto retry; 1327 goto retry;
1328
1329 drm_for_each_crtc(crtc, dev)
1330 if (drm_crtc_mask(crtc) & crtc_mask)
1331 crtc->acquire_ctx = state->acquire_ctx;
1332
1333 if (global)
1334 dev->mode_config.acquire_ctx = state->acquire_ctx;
1306} 1335}
1307EXPORT_SYMBOL(drm_atomic_legacy_backoff); 1336EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1308 1337
@@ -1431,7 +1460,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1431 */ 1460 */
1432 1461
1433static struct drm_pending_vblank_event *create_vblank_event( 1462static struct drm_pending_vblank_event *create_vblank_event(
1434 struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) 1463 struct drm_device *dev, struct drm_file *file_priv,
1464 struct fence *fence, uint64_t user_data)
1435{ 1465{
1436 struct drm_pending_vblank_event *e = NULL; 1466 struct drm_pending_vblank_event *e = NULL;
1437 int ret; 1467 int ret;
@@ -1444,12 +1474,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
1444 e->event.base.length = sizeof(e->event); 1474 e->event.base.length = sizeof(e->event);
1445 e->event.user_data = user_data; 1475 e->event.user_data = user_data;
1446 1476
1447 ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); 1477 if (file_priv) {
1448 if (ret) { 1478 ret = drm_event_reserve_init(dev, file_priv, &e->base,
1449 kfree(e); 1479 &e->event.base);
1450 return NULL; 1480 if (ret) {
1481 kfree(e);
1482 return NULL;
1483 }
1451 } 1484 }
1452 1485
1486 e->base.fence = fence;
1487
1453 return e; 1488 return e;
1454} 1489}
1455 1490
@@ -1689,7 +1724,8 @@ retry:
1689 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1724 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1690 struct drm_pending_vblank_event *e; 1725 struct drm_pending_vblank_event *e;
1691 1726
1692 e = create_vblank_event(dev, file_priv, arg->user_data); 1727 e = create_vblank_event(dev, file_priv, NULL,
1728 arg->user_data);
1693 if (!e) { 1729 if (!e) {
1694 ret = -ENOMEM; 1730 ret = -ENOMEM;
1695 goto out; 1731 goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120e39..de7fddce3cef 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
110 110
111 if (funcs->atomic_best_encoder) 111 if (funcs->atomic_best_encoder)
112 new_encoder = funcs->atomic_best_encoder(connector, conn_state); 112 new_encoder = funcs->atomic_best_encoder(connector, conn_state);
113 else 113 else if (funcs->best_encoder)
114 new_encoder = funcs->best_encoder(connector); 114 new_encoder = funcs->best_encoder(connector);
115 else
116 new_encoder = drm_atomic_helper_best_encoder(connector);
115 117
116 if (new_encoder) { 118 if (new_encoder) {
117 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { 119 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state,
298 if (funcs->atomic_best_encoder) 300 if (funcs->atomic_best_encoder)
299 new_encoder = funcs->atomic_best_encoder(connector, 301 new_encoder = funcs->atomic_best_encoder(connector,
300 connector_state); 302 connector_state);
301 else 303 else if (funcs->best_encoder)
302 new_encoder = funcs->best_encoder(connector); 304 new_encoder = funcs->best_encoder(connector);
305 else
306 new_encoder = drm_atomic_helper_best_encoder(connector);
303 307
304 if (!new_encoder) { 308 if (!new_encoder) {
305 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 309 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state)
414 for_each_crtc_in_state(state, crtc, crtc_state, i) { 418 for_each_crtc_in_state(state, crtc, crtc_state, i) {
415 const struct drm_crtc_helper_funcs *funcs; 419 const struct drm_crtc_helper_funcs *funcs;
416 420
421 if (!crtc_state->enable)
422 continue;
423
417 if (!crtc_state->mode_changed && 424 if (!crtc_state->mode_changed &&
418 !crtc_state->connectors_changed) 425 !crtc_state->connectors_changed)
419 continue; 426 continue;
@@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state)
458 * times for the same update, e.g. when the ->atomic_check functions depend upon 465 * times for the same update, e.g. when the ->atomic_check functions depend upon
459 * the adjusted dotclock for fifo space allocation and watermark computation. 466 * the adjusted dotclock for fifo space allocation and watermark computation.
460 * 467 *
461 * RETURNS 468 * RETURNS:
462 * Zero for success or -errno 469 * Zero for success or -errno
463 */ 470 */
464int 471int
@@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
572 * It also sets crtc_state->planes_changed to indicate that a crtc has 579 * It also sets crtc_state->planes_changed to indicate that a crtc has
573 * updated planes. 580 * updated planes.
574 * 581 *
575 * RETURNS 582 * RETURNS:
576 * Zero for success or -errno 583 * Zero for success or -errno
577 */ 584 */
578int 585int
@@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
611 if (!funcs || !funcs->atomic_check) 618 if (!funcs || !funcs->atomic_check)
612 continue; 619 continue;
613 620
614 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 621 ret = funcs->atomic_check(crtc, crtc_state);
615 if (ret) { 622 if (ret) {
616 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 623 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
617 crtc->base.id, crtc->name); 624 crtc->base.id, crtc->name);
@@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
640 * ->atomic_check functions depend upon an updated adjusted_mode.clock to 647 * ->atomic_check functions depend upon an updated adjusted_mode.clock to
641 * e.g. properly compute watermarks. 648 * e.g. properly compute watermarks.
642 * 649 *
643 * RETURNS 650 * RETURNS:
644 * Zero for success or -errno 651 * Zero for success or -errno
645 */ 652 */
646int drm_atomic_helper_check(struct drm_device *dev, 653int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1113EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1120EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1114 1121
1115/** 1122/**
1116 * drm_atomic_helper_commit - commit validated state object 1123 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1117 * @dev: DRM device 1124 * @state: new modeset state to be committed
1118 * @state: the driver state object
1119 * @nonblocking: whether nonblocking behavior is requested.
1120 * 1125 *
1121 * This function commits a with drm_atomic_helper_check() pre-validated state 1126 * This is the default implemenation for the ->atomic_commit_tail() hook of the
1122 * object. This can still fail when e.g. the framebuffer reservation fails. For 1127 * &drm_mode_config_helper_funcs vtable.
1123 * now this doesn't implement nonblocking commits.
1124 * 1128 *
1125 * Note that right now this function does not support nonblocking commits, hence 1129 * Note that the default ordering of how the various stages are called is to
1126 * driver writers must implement their own version for now. Also note that the 1130 * match the legacy modeset helper library closest. One peculiarity of that is
1127 * default ordering of how the various stages are called is to match the legacy 1131 * that it doesn't mesh well with runtime PM at all.
1128 * modeset helper library closest. One peculiarity of that is that it doesn't
1129 * mesh well with runtime PM at all.
1130 * 1132 *
1131 * For drivers supporting runtime PM the recommended sequence is 1133 * For drivers supporting runtime PM the recommended sequence is instead ::
1132 * 1134 *
1133 * drm_atomic_helper_commit_modeset_disables(dev, state); 1135 * drm_atomic_helper_commit_modeset_disables(dev, state);
1134 * 1136 *
@@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1136 * 1138 *
1137 * drm_atomic_helper_commit_planes(dev, state, true); 1139 * drm_atomic_helper_commit_planes(dev, state, true);
1138 * 1140 *
1139 * See the kerneldoc entries for these three functions for more details. 1141 * for committing the atomic update to hardware. See the kerneldoc entries for
1142 * these three functions for more details.
1143 */
1144void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1145{
1146 struct drm_device *dev = state->dev;
1147
1148 drm_atomic_helper_commit_modeset_disables(dev, state);
1149
1150 drm_atomic_helper_commit_planes(dev, state, false);
1151
1152 drm_atomic_helper_commit_modeset_enables(dev, state);
1153
1154 drm_atomic_helper_commit_hw_done(state);
1155
1156 drm_atomic_helper_wait_for_vblanks(dev, state);
1157
1158 drm_atomic_helper_cleanup_planes(dev, state);
1159}
1160EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1161
1162static void commit_tail(struct drm_atomic_state *state)
1163{
1164 struct drm_device *dev = state->dev;
1165 struct drm_mode_config_helper_funcs *funcs;
1166
1167 funcs = dev->mode_config.helper_private;
1168
1169 drm_atomic_helper_wait_for_fences(dev, state);
1170
1171 drm_atomic_helper_wait_for_dependencies(state);
1172
1173 if (funcs && funcs->atomic_commit_tail)
1174 funcs->atomic_commit_tail(state);
1175 else
1176 drm_atomic_helper_commit_tail(state);
1177
1178 drm_atomic_helper_commit_cleanup_done(state);
1179
1180 drm_atomic_state_free(state);
1181}
1182
1183static void commit_work(struct work_struct *work)
1184{
1185 struct drm_atomic_state *state = container_of(work,
1186 struct drm_atomic_state,
1187 commit_work);
1188 commit_tail(state);
1189}
1190
1191/**
1192 * drm_atomic_helper_commit - commit validated state object
1193 * @dev: DRM device
1194 * @state: the driver state object
1195 * @nonblock: whether nonblocking behavior is requested.
1196 *
1197 * This function commits a with drm_atomic_helper_check() pre-validated state
1198 * object. This can still fail when e.g. the framebuffer reservation fails. This
1199 * function implements nonblocking commits, using
1200 * drm_atomic_helper_setup_commit() and related functions.
1201 *
1202 * Note that right now this function does not support nonblocking commits, hence
1203 * driver writers must implement their own version for now.
1204 *
1205 * Committing the actual hardware state is done through the
1206 * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
1207 * or it's default implementation drm_atomic_helper_commit_tail().
1140 * 1208 *
1141 * RETURNS 1209 * RETURNS:
1142 * Zero for success or -errno. 1210 * Zero for success or -errno.
1143 */ 1211 */
1144int drm_atomic_helper_commit(struct drm_device *dev, 1212int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1147{ 1215{
1148 int ret; 1216 int ret;
1149 1217
1150 if (nonblock) 1218 ret = drm_atomic_helper_setup_commit(state, nonblock);
1151 return -EBUSY; 1219 if (ret)
1220 return ret;
1221
1222 INIT_WORK(&state->commit_work, commit_work);
1152 1223
1153 ret = drm_atomic_helper_prepare_planes(dev, state); 1224 ret = drm_atomic_helper_prepare_planes(dev, state);
1154 if (ret) 1225 if (ret)
@@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1160 * the software side now. 1231 * the software side now.
1161 */ 1232 */
1162 1233
1163 drm_atomic_helper_swap_state(dev, state); 1234 drm_atomic_helper_swap_state(state, true);
1164 1235
1165 /* 1236 /*
1166 * Everything below can be run asynchronously without the need to grab 1237 * Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1176 * update. Which is important since compositors need to figure out the 1247 * update. Which is important since compositors need to figure out the
1177 * composition of the next frame right after having submitted the 1248 * composition of the next frame right after having submitted the
1178 * current layout. 1249 * current layout.
1250 *
1251 * NOTE: Commit work has multiple phases, first hardware commit, then
1252 * cleanup. We want them to overlap, hence need system_unbound_wq to
1253 * make sure work items don't artifically stall on each another.
1179 */ 1254 */
1180 1255
1181 drm_atomic_helper_wait_for_fences(dev, state); 1256 if (nonblock)
1182 1257 queue_work(system_unbound_wq, &state->commit_work);
1183 drm_atomic_helper_commit_modeset_disables(dev, state); 1258 else
1184 1259 commit_tail(state);
1185 drm_atomic_helper_commit_planes(dev, state, false);
1186
1187 drm_atomic_helper_commit_modeset_enables(dev, state);
1188
1189 drm_atomic_helper_wait_for_vblanks(dev, state);
1190
1191 drm_atomic_helper_cleanup_planes(dev, state);
1192
1193 drm_atomic_state_free(state);
1194 1260
1195 return 0; 1261 return 0;
1196} 1262}
@@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1199/** 1265/**
1200 * DOC: implementing nonblocking commit 1266 * DOC: implementing nonblocking commit
1201 * 1267 *
1202 * For now the atomic helpers don't support nonblocking commit directly. If 1268 * Nonblocking atomic commits have to be implemented in the following sequence:
1203 * there is real need it could be added though, using the dma-buf fence
1204 * infrastructure for generic synchronization with outstanding rendering.
1205 *
1206 * For now drivers have to implement nonblocking commit themselves, with the
1207 * following sequence being the recommended one:
1208 * 1269 *
1209 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function 1270 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1210 * which commit needs to call which can fail, so we want to run it first and 1271 * which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1216 * cancelled updates. Note that it is important to ensure that the framebuffer 1277 * cancelled updates. Note that it is important to ensure that the framebuffer
1217 * cleanup is still done when cancelling. 1278 * cleanup is still done when cancelling.
1218 * 1279 *
1219 * For sufficient parallelism it is recommended to have a work item per crtc 1280 * Asynchronous workers need to have sufficient parallelism to be able to run
1220 * (for updates which don't touch global state) and a global one. Then we only 1281 * different atomic commits on different CRTCs in parallel. The simplest way to
1221 * need to synchronize with the crtc work items for changed crtcs and the global 1282 * achive this is by running them on the &system_unbound_wq work queue. Note
1222 * work item, which allows nice concurrent updates on disjoint sets of crtcs. 1283 * that drivers are not required to split up atomic commits and run an
1284 * individual commit in parallel - userspace is supposed to do that if it cares.
1285 * But it might be beneficial to do that for modesets, since those necessarily
1286 * must be done as one global operation, and enabling or disabling a CRTC can
1287 * take a long time. But even that is not required.
1223 * 1288 *
1224 * 3. The software state is updated synchronously with 1289 * 3. The software state is updated synchronously with
1225 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 1290 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,8 +1297,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1232 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 1297 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1233 * then cleaning up the framebuffers after the old framebuffer is no longer 1298 * then cleaning up the framebuffers after the old framebuffer is no longer
1234 * being displayed. 1299 * being displayed.
1300 *
1301 * The above scheme is implemented in the atomic helper libraries in
1302 * drm_atomic_helper_commit() using a bunch of helper functions. See
1303 * drm_atomic_helper_setup_commit() for a starting point.
1235 */ 1304 */
1236 1305
1306static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1307{
1308 struct drm_crtc_commit *commit, *stall_commit = NULL;
1309 bool completed = true;
1310 int i;
1311 long ret = 0;
1312
1313 spin_lock(&crtc->commit_lock);
1314 i = 0;
1315 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1316 if (i == 0) {
1317 completed = try_wait_for_completion(&commit->flip_done);
1318 /* Userspace is not allowed to get ahead of the previous
1319 * commit with nonblocking ones. */
1320 if (!completed && nonblock) {
1321 spin_unlock(&crtc->commit_lock);
1322 return -EBUSY;
1323 }
1324 } else if (i == 1) {
1325 stall_commit = commit;
1326 drm_crtc_commit_get(stall_commit);
1327 break;
1328 }
1329
1330 i++;
1331 }
1332 spin_unlock(&crtc->commit_lock);
1333
1334 if (!stall_commit)
1335 return 0;
1336
1337 /* We don't want to let commits get ahead of cleanup work too much,
1338 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1339 */
1340 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1341 10*HZ);
1342 if (ret == 0)
1343 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1344 crtc->base.id, crtc->name);
1345
1346 drm_crtc_commit_put(stall_commit);
1347
1348 return ret < 0 ? ret : 0;
1349}
1350
1351/**
1352 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1353 * @state: new modeset state to be committed
1354 * @nonblock: whether nonblocking behavior is requested.
1355 *
1356 * This function prepares @state to be used by the atomic helper's support for
1357 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1358 * should always call this function from their ->atomic_commit hook.
1359 *
1360 * To be able to use this support drivers need to use a few more helper
1361 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1362 * actually committing the hardware state, and for nonblocking commits this call
1363 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1364 * and it's stall parameter, for when a driver's commit hooks look at the
1365 * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
1366 *
1367 * Completion of the hardware commit step must be signalled using
1368 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1369 * to read or change any permanent software or hardware modeset state. The only
1370 * exception is state protected by other means than &drm_modeset_lock locks.
1371 * Only the free standing @state with pointers to the old state structures can
1372 * be inspected, e.g. to clean up old buffers using
1373 * drm_atomic_helper_cleanup_planes().
1374 *
1375 * At the very end, before cleaning up @state drivers must call
1376 * drm_atomic_helper_commit_cleanup_done().
1377 *
1378 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1379 * complete and esay-to-use default implementation of the atomic_commit() hook.
1380 *
1381 * The tracking of asynchronously executed and still pending commits is done
1382 * using the core structure &drm_crtc_commit.
1383 *
1384 * By default there's no need to clean up resources allocated by this function
1385 * explicitly: drm_atomic_state_default_clear() will take care of that
1386 * automatically.
1387 *
1388 * Returns:
1389 *
1390 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1391 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1392 */
1393int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1394 bool nonblock)
1395{
1396 struct drm_crtc *crtc;
1397 struct drm_crtc_state *crtc_state;
1398 struct drm_crtc_commit *commit;
1399 int i, ret;
1400
1401 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1402 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1403 if (!commit)
1404 return -ENOMEM;
1405
1406 init_completion(&commit->flip_done);
1407 init_completion(&commit->hw_done);
1408 init_completion(&commit->cleanup_done);
1409 INIT_LIST_HEAD(&commit->commit_entry);
1410 kref_init(&commit->ref);
1411 commit->crtc = crtc;
1412
1413 state->crtcs[i].commit = commit;
1414
1415 ret = stall_checks(crtc, nonblock);
1416 if (ret)
1417 return ret;
1418
1419 /* Drivers only send out events when at least either current or
1420 * new CRTC state is active. Complete right away if everything
1421 * stays off. */
1422 if (!crtc->state->active && !crtc_state->active) {
1423 complete_all(&commit->flip_done);
1424 continue;
1425 }
1426
1427 /* Legacy cursor updates are fully unsynced. */
1428 if (state->legacy_cursor_update) {
1429 complete_all(&commit->flip_done);
1430 continue;
1431 }
1432
1433 if (!crtc_state->event) {
1434 commit->event = kzalloc(sizeof(*commit->event),
1435 GFP_KERNEL);
1436 if (!commit->event)
1437 return -ENOMEM;
1438
1439 crtc_state->event = commit->event;
1440 }
1441
1442 crtc_state->event->base.completion = &commit->flip_done;
1443 }
1444
1445 return 0;
1446}
1447EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1448
1449
1450static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
1451{
1452 struct drm_crtc_commit *commit;
1453 int i = 0;
1454
1455 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1456 /* skip the first entry, that's the current commit */
1457 if (i == 1)
1458 return commit;
1459 i++;
1460 }
1461
1462 return NULL;
1463}
1464
1465/**
1466 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
1467 * @state: new modeset state to be committed
1468 *
1469 * This function waits for all preceeding commits that touch the same CRTC as
1470 * @state to both be committed to the hardware (as signalled by
1471 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
1472 * by calling drm_crtc_vblank_send_event on the event member of
1473 * &drm_crtc_state).
1474 *
1475 * This is part of the atomic helper support for nonblocking commits, see
1476 * drm_atomic_helper_setup_commit() for an overview.
1477 */
1478void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
1479{
1480 struct drm_crtc *crtc;
1481 struct drm_crtc_state *crtc_state;
1482 struct drm_crtc_commit *commit;
1483 int i;
1484 long ret;
1485
1486 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1487 spin_lock(&crtc->commit_lock);
1488 commit = preceeding_commit(crtc);
1489 if (commit)
1490 drm_crtc_commit_get(commit);
1491 spin_unlock(&crtc->commit_lock);
1492
1493 if (!commit)
1494 continue;
1495
1496 ret = wait_for_completion_timeout(&commit->hw_done,
1497 10*HZ);
1498 if (ret == 0)
1499 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1500 crtc->base.id, crtc->name);
1501
1502 /* Currently no support for overwriting flips, hence
1503 * stall for previous one to execute completely. */
1504 ret = wait_for_completion_timeout(&commit->flip_done,
1505 10*HZ);
1506 if (ret == 0)
1507 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1508 crtc->base.id, crtc->name);
1509
1510 drm_crtc_commit_put(commit);
1511 }
1512}
1513EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
1514
1515/**
1516 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
1517 * @state: new modeset state to be committed
1518 *
1519 * This function is used to signal completion of the hardware commit step. After
1520 * this step the driver is not allowed to read or change any permanent software
1521 * or hardware modeset state. The only exception is state protected by other
1522 * means than &drm_modeset_lock locks.
1523 *
1524 * Drivers should try to postpone any expensive or delayed cleanup work after
1525 * this function is called.
1526 *
1527 * This is part of the atomic helper support for nonblocking commits, see
1528 * drm_atomic_helper_setup_commit() for an overview.
1529 */
1530void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
1531{
1532 struct drm_crtc *crtc;
1533 struct drm_crtc_state *crtc_state;
1534 struct drm_crtc_commit *commit;
1535 int i;
1536
1537 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1538 commit = state->crtcs[i].commit;
1539 if (!commit)
1540 continue;
1541
1542 /* backend must have consumed any event by now */
1543 WARN_ON(crtc->state->event);
1544 spin_lock(&crtc->commit_lock);
1545 complete_all(&commit->hw_done);
1546 spin_unlock(&crtc->commit_lock);
1547 }
1548}
1549EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
1550
1551/**
1552 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
1553 * @state: new modeset state to be committed
1554 *
1555 * This signals completion of the atomic update @state, including any cleanup
1556 * work. If used, it must be called right before calling
1557 * drm_atomic_state_free().
1558 *
1559 * This is part of the atomic helper support for nonblocking commits, see
1560 * drm_atomic_helper_setup_commit() for an overview.
1561 */
1562void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
1563{
1564 struct drm_crtc *crtc;
1565 struct drm_crtc_state *crtc_state;
1566 struct drm_crtc_commit *commit;
1567 int i;
1568 long ret;
1569
1570 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1571 commit = state->crtcs[i].commit;
1572 if (WARN_ON(!commit))
1573 continue;
1574
1575 spin_lock(&crtc->commit_lock);
1576 complete_all(&commit->cleanup_done);
1577 WARN_ON(!try_wait_for_completion(&commit->hw_done));
1578
1579 /* commit_list borrows our reference, need to remove before we
1580 * clean up our drm_atomic_state. But only after it actually
1581 * completed, otherwise subsequent commits won't stall properly. */
1582 if (try_wait_for_completion(&commit->flip_done))
1583 goto del_commit;
1584
1585 spin_unlock(&crtc->commit_lock);
1586
1587 /* We must wait for the vblank event to signal our completion
1588 * before releasing our reference, since the vblank work does
1589 * not hold a reference of its own. */
1590 ret = wait_for_completion_timeout(&commit->flip_done,
1591 10*HZ);
1592 if (ret == 0)
1593 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1594 crtc->base.id, crtc->name);
1595
1596 spin_lock(&crtc->commit_lock);
1597del_commit:
1598 list_del(&commit->commit_entry);
1599 spin_unlock(&crtc->commit_lock);
1600 }
1601}
1602EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
1603
1237/** 1604/**
1238 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 1605 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
1239 * @dev: DRM device 1606 * @dev: DRM device
@@ -1249,16 +1616,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1249int drm_atomic_helper_prepare_planes(struct drm_device *dev, 1616int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1250 struct drm_atomic_state *state) 1617 struct drm_atomic_state *state)
1251{ 1618{
1252 int nplanes = dev->mode_config.num_total_plane; 1619 struct drm_plane *plane;
1253 int ret, i; 1620 struct drm_plane_state *plane_state;
1621 int ret, i, j;
1254 1622
1255 for (i = 0; i < nplanes; i++) { 1623 for_each_plane_in_state(state, plane, plane_state, i) {
1256 const struct drm_plane_helper_funcs *funcs; 1624 const struct drm_plane_helper_funcs *funcs;
1257 struct drm_plane *plane = state->planes[i];
1258 struct drm_plane_state *plane_state = state->plane_states[i];
1259
1260 if (!plane)
1261 continue;
1262 1625
1263 funcs = plane->helper_private; 1626 funcs = plane->helper_private;
1264 1627
@@ -1272,12 +1635,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1272 return 0; 1635 return 0;
1273 1636
1274fail: 1637fail:
1275 for (i--; i >= 0; i--) { 1638 for_each_plane_in_state(state, plane, plane_state, j) {
1276 const struct drm_plane_helper_funcs *funcs; 1639 const struct drm_plane_helper_funcs *funcs;
1277 struct drm_plane *plane = state->planes[i];
1278 struct drm_plane_state *plane_state = state->plane_states[i];
1279 1640
1280 if (!plane) 1641 if (j >= i)
1281 continue; 1642 continue;
1282 1643
1283 funcs = plane->helper_private; 1644 funcs = plane->helper_private;
@@ -1537,8 +1898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1537 1898
1538/** 1899/**
1539 * drm_atomic_helper_swap_state - store atomic state into current sw state 1900 * drm_atomic_helper_swap_state - store atomic state into current sw state
1540 * @dev: DRM device
1541 * @state: atomic state 1901 * @state: atomic state
1902 * @stall: stall for proceeding commits
1542 * 1903 *
1543 * This function stores the atomic state into the current state pointers in all 1904 * This function stores the atomic state into the current state pointers in all
1544 * driver objects. It should be called after all failing steps have been done 1905 * driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1920,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1559 * 1920 *
1560 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 1921 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
1561 * contains the old state. Also do any other cleanup required with that state. 1922 * contains the old state. Also do any other cleanup required with that state.
1923 *
1924 * @stall must be set when nonblocking commits for this driver directly access
1925 * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
1926 * current atomic helpers this is almost always the case, since the helpers
1927 * don't pass the right state structures to the callbacks.
1562 */ 1928 */
1563void drm_atomic_helper_swap_state(struct drm_device *dev, 1929void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
1564 struct drm_atomic_state *state) 1930 bool stall)
1565{ 1931{
1566 int i; 1932 int i;
1933 long ret;
1934 struct drm_connector *connector;
1935 struct drm_connector_state *conn_state;
1936 struct drm_crtc *crtc;
1937 struct drm_crtc_state *crtc_state;
1938 struct drm_plane *plane;
1939 struct drm_plane_state *plane_state;
1940 struct drm_crtc_commit *commit;
1941
1942 if (stall) {
1943 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1944 spin_lock(&crtc->commit_lock);
1945 commit = list_first_entry_or_null(&crtc->commit_list,
1946 struct drm_crtc_commit, commit_entry);
1947 if (commit)
1948 drm_crtc_commit_get(commit);
1949 spin_unlock(&crtc->commit_lock);
1950
1951 if (!commit)
1952 continue;
1567 1953
1568 for (i = 0; i < state->num_connector; i++) { 1954 ret = wait_for_completion_timeout(&commit->hw_done,
1569 struct drm_connector *connector = state->connectors[i]; 1955 10*HZ);
1570 1956 if (ret == 0)
1571 if (!connector) 1957 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1572 continue; 1958 crtc->base.id, crtc->name);
1959 drm_crtc_commit_put(commit);
1960 }
1961 }
1573 1962
1963 for_each_connector_in_state(state, connector, conn_state, i) {
1574 connector->state->state = state; 1964 connector->state->state = state;
1575 swap(state->connector_states[i], connector->state); 1965 swap(state->connectors[i].state, connector->state);
1576 connector->state->state = NULL; 1966 connector->state->state = NULL;
1577 } 1967 }
1578 1968
1579 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1969 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1580 struct drm_crtc *crtc = state->crtcs[i];
1581
1582 if (!crtc)
1583 continue;
1584
1585 crtc->state->state = state; 1970 crtc->state->state = state;
1586 swap(state->crtc_states[i], crtc->state); 1971 swap(state->crtcs[i].state, crtc->state);
1587 crtc->state->state = NULL; 1972 crtc->state->state = NULL;
1588 }
1589 1973
1590 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 1974 if (state->crtcs[i].commit) {
1591 struct drm_plane *plane = state->planes[i]; 1975 spin_lock(&crtc->commit_lock);
1976 list_add(&state->crtcs[i].commit->commit_entry,
1977 &crtc->commit_list);
1978 spin_unlock(&crtc->commit_lock);
1592 1979
1593 if (!plane) 1980 state->crtcs[i].commit->event = NULL;
1594 continue; 1981 }
1982 }
1595 1983
1984 for_each_plane_in_state(state, plane, plane_state, i) {
1596 plane->state->state = state; 1985 plane->state->state = state;
1597 swap(state->plane_states[i], plane->state); 1986 swap(state->planes[i].state, plane->state);
1598 plane->state->state = NULL; 1987 plane->state->state = NULL;
1599 } 1988 }
1600} 1989}
@@ -2409,7 +2798,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
2409 * This is the main helper function provided by the atomic helper framework for 2798 * This is the main helper function provided by the atomic helper framework for
2410 * implementing the legacy DPMS connector interface. It computes the new desired 2799 * implementing the legacy DPMS connector interface. It computes the new desired
2411 * ->active state for the corresponding CRTC (if the connector is enabled) and 2800 * ->active state for the corresponding CRTC (if the connector is enabled) and
2412 * updates it. 2801 * updates it.
2413 * 2802 *
2414 * Returns: 2803 * Returns:
2415 * Returns 0 on success, negative errno numbers on failure. 2804 * Returns 0 on success, negative errno numbers on failure.
@@ -2930,16 +3319,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
2930 * @red: red correction table 3319 * @red: red correction table
2931 * @green: green correction table 3320 * @green: green correction table
2932 * @blue: green correction table 3321 * @blue: green correction table
2933 * @start:
2934 * @size: size of the tables 3322 * @size: size of the tables
2935 * 3323 *
2936 * Implements support for legacy gamma correction table for drivers 3324 * Implements support for legacy gamma correction table for drivers
2937 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3325 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
2938 * properties. 3326 * properties.
2939 */ 3327 */
2940void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3328int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2941 u16 *red, u16 *green, u16 *blue, 3329 u16 *red, u16 *green, u16 *blue,
2942 uint32_t start, uint32_t size) 3330 uint32_t size)
2943{ 3331{
2944 struct drm_device *dev = crtc->dev; 3332 struct drm_device *dev = crtc->dev;
2945 struct drm_mode_config *config = &dev->mode_config; 3333 struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3339,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2951 3339
2952 state = drm_atomic_state_alloc(crtc->dev); 3340 state = drm_atomic_state_alloc(crtc->dev);
2953 if (!state) 3341 if (!state)
2954 return; 3342 return -ENOMEM;
2955 3343
2956 blob = drm_property_create_blob(dev, 3344 blob = drm_property_create_blob(dev,
2957 sizeof(struct drm_color_lut) * size, 3345 sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3390,7 @@ retry:
3002 3390
3003 drm_property_unreference_blob(blob); 3391 drm_property_unreference_blob(blob);
3004 3392
3005 return; 3393 return 0;
3006fail: 3394fail:
3007 if (ret == -EDEADLK) 3395 if (ret == -EDEADLK)
3008 goto backoff; 3396 goto backoff;
@@ -3010,7 +3398,7 @@ fail:
3010 drm_atomic_state_free(state); 3398 drm_atomic_state_free(state);
3011 drm_property_unreference_blob(blob); 3399 drm_property_unreference_blob(blob);
3012 3400
3013 return; 3401 return ret;
3014backoff: 3402backoff:
3015 drm_atomic_state_clear(state); 3403 drm_atomic_state_clear(state);
3016 drm_atomic_legacy_backoff(state); 3404 drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 50d0baa06db0..4153e8a193af 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -30,25 +30,36 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "drm_internal.h" 32#include "drm_internal.h"
33#include "drm_legacy.h"
33 34
34/** 35/**
35 * drm_getmagic - Get unique magic of a client 36 * DOC: master and authentication
36 * @dev: DRM device to operate on
37 * @data: ioctl data containing the drm_auth object
38 * @file_priv: DRM file that performs the operation
39 * 37 *
40 * This looks up the unique magic of the passed client and returns it. If the 38 * struct &drm_master is used to track groups of clients with open
41 * client did not have a magic assigned, yet, a new one is registered. The magic 39 * primary/legacy device nodes. For every struct &drm_file which has had at
42 * is stored in the passed drm_auth object. 40 * least once successfully became the device master (either through the
41 * SET_MASTER IOCTL, or implicitly through opening the primary device node when
42 * no one else is the current master that time) there exists one &drm_master.
43 * This is noted in the is_master member of &drm_file. All other clients have
44 * just a pointer to the &drm_master they are associated with.
43 * 45 *
44 * Returns: 0 on success, negative error code on failure. 46 * In addition only one &drm_master can be the current master for a &drm_device.
47 * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
48 * implicitly through closing/openeing the primary device node. See also
49 * drm_is_current_master().
50 *
51 * Clients can authenticate against the current master (if it matches their own)
52 * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
53 * this allows controlled access to the device for an entire group of mutually
54 * trusted clients.
45 */ 55 */
56
46int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) 57int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
47{ 58{
48 struct drm_auth *auth = data; 59 struct drm_auth *auth = data;
49 int ret = 0; 60 int ret = 0;
50 61
51 mutex_lock(&dev->struct_mutex); 62 mutex_lock(&dev->master_mutex);
52 if (!file_priv->magic) { 63 if (!file_priv->magic) {
53 ret = idr_alloc(&file_priv->master->magic_map, file_priv, 64 ret = idr_alloc(&file_priv->master->magic_map, file_priv,
54 1, 0, GFP_KERNEL); 65 1, 0, GFP_KERNEL);
@@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
56 file_priv->magic = ret; 67 file_priv->magic = ret;
57 } 68 }
58 auth->magic = file_priv->magic; 69 auth->magic = file_priv->magic;
59 mutex_unlock(&dev->struct_mutex); 70 mutex_unlock(&dev->master_mutex);
60 71
61 DRM_DEBUG("%u\n", auth->magic); 72 DRM_DEBUG("%u\n", auth->magic);
62 73
63 return ret < 0 ? ret : 0; 74 return ret < 0 ? ret : 0;
64} 75}
65 76
66/**
67 * drm_authmagic - Authenticate client with a magic
68 * @dev: DRM device to operate on
69 * @data: ioctl data containing the drm_auth object
70 * @file_priv: DRM file that performs the operation
71 *
72 * This looks up a DRM client by the passed magic and authenticates it.
73 *
74 * Returns: 0 on success, negative error code on failure.
75 */
76int drm_authmagic(struct drm_device *dev, void *data, 77int drm_authmagic(struct drm_device *dev, void *data,
77 struct drm_file *file_priv) 78 struct drm_file *file_priv)
78{ 79{
@@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data,
81 82
82 DRM_DEBUG("%u\n", auth->magic); 83 DRM_DEBUG("%u\n", auth->magic);
83 84
84 mutex_lock(&dev->struct_mutex); 85 mutex_lock(&dev->master_mutex);
85 file = idr_find(&file_priv->master->magic_map, auth->magic); 86 file = idr_find(&file_priv->master->magic_map, auth->magic);
86 if (file) { 87 if (file) {
87 file->authenticated = 1; 88 file->authenticated = 1;
88 idr_replace(&file_priv->master->magic_map, NULL, auth->magic); 89 idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
89 } 90 }
90 mutex_unlock(&dev->struct_mutex); 91 mutex_unlock(&dev->master_mutex);
91 92
92 return file ? 0 : -EINVAL; 93 return file ? 0 : -EINVAL;
93} 94}
95
96static struct drm_master *drm_master_create(struct drm_device *dev)
97{
98 struct drm_master *master;
99
100 master = kzalloc(sizeof(*master), GFP_KERNEL);
101 if (!master)
102 return NULL;
103
104 kref_init(&master->refcount);
105 spin_lock_init(&master->lock.spinlock);
106 init_waitqueue_head(&master->lock.lock_queue);
107 idr_init(&master->magic_map);
108 master->dev = dev;
109
110 return master;
111}
112
113static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
114 bool new_master)
115{
116 int ret = 0;
117
118 dev->master = drm_master_get(fpriv->master);
119 if (dev->driver->master_set) {
120 ret = dev->driver->master_set(dev, fpriv, new_master);
121 if (unlikely(ret != 0)) {
122 drm_master_put(&dev->master);
123 }
124 }
125
126 return ret;
127}
128
129static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
130{
131 struct drm_master *old_master;
132 int ret;
133
134 lockdep_assert_held_once(&dev->master_mutex);
135
136 old_master = fpriv->master;
137 fpriv->master = drm_master_create(dev);
138 if (!fpriv->master) {
139 fpriv->master = old_master;
140 return -ENOMEM;
141 }
142
143 if (dev->driver->master_create) {
144 ret = dev->driver->master_create(dev, fpriv->master);
145 if (ret)
146 goto out_err;
147 }
148 fpriv->is_master = 1;
149 fpriv->authenticated = 1;
150
151 ret = drm_set_master(dev, fpriv, true);
152 if (ret)
153 goto out_err;
154
155 if (old_master)
156 drm_master_put(&old_master);
157
158 return 0;
159
160out_err:
161 /* drop references and restore old master on failure */
162 drm_master_put(&fpriv->master);
163 fpriv->master = old_master;
164
165 return ret;
166}
167
168int drm_setmaster_ioctl(struct drm_device *dev, void *data,
169 struct drm_file *file_priv)
170{
171 int ret = 0;
172
173 mutex_lock(&dev->master_mutex);
174 if (drm_is_current_master(file_priv))
175 goto out_unlock;
176
177 if (dev->master) {
178 ret = -EINVAL;
179 goto out_unlock;
180 }
181
182 if (!file_priv->master) {
183 ret = -EINVAL;
184 goto out_unlock;
185 }
186
187 if (!file_priv->is_master) {
188 ret = drm_new_set_master(dev, file_priv);
189 goto out_unlock;
190 }
191
192 ret = drm_set_master(dev, file_priv, false);
193out_unlock:
194 mutex_unlock(&dev->master_mutex);
195 return ret;
196}
197
198static void drm_drop_master(struct drm_device *dev,
199 struct drm_file *fpriv)
200{
201 if (dev->driver->master_drop)
202 dev->driver->master_drop(dev, fpriv);
203 drm_master_put(&dev->master);
204}
205
206int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file_priv)
208{
209 int ret = -EINVAL;
210
211 mutex_lock(&dev->master_mutex);
212 if (!drm_is_current_master(file_priv))
213 goto out_unlock;
214
215 if (!dev->master)
216 goto out_unlock;
217
218 ret = 0;
219 drm_drop_master(dev, file_priv);
220out_unlock:
221 mutex_unlock(&dev->master_mutex);
222 return ret;
223}
224
225int drm_master_open(struct drm_file *file_priv)
226{
227 struct drm_device *dev = file_priv->minor->dev;
228 int ret = 0;
229
230 /* if there is no current master make this fd it, but do not create
231 * any master object for render clients */
232 mutex_lock(&dev->master_mutex);
233 if (!dev->master)
234 ret = drm_new_set_master(dev, file_priv);
235 else
236 file_priv->master = drm_master_get(dev->master);
237 mutex_unlock(&dev->master_mutex);
238
239 return ret;
240}
241
242void drm_master_release(struct drm_file *file_priv)
243{
244 struct drm_device *dev = file_priv->minor->dev;
245 struct drm_master *master = file_priv->master;
246
247 mutex_lock(&dev->master_mutex);
248 if (file_priv->magic)
249 idr_remove(&file_priv->master->magic_map, file_priv->magic);
250
251 if (!drm_is_current_master(file_priv))
252 goto out;
253
254 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
255 /*
256 * Since the master is disappearing, so is the
257 * possibility to lock.
258 */
259 mutex_lock(&dev->struct_mutex);
260 if (master->lock.hw_lock) {
261 if (dev->sigdata.lock == master->lock.hw_lock)
262 dev->sigdata.lock = NULL;
263 master->lock.hw_lock = NULL;
264 master->lock.file_priv = NULL;
265 wake_up_interruptible_all(&master->lock.lock_queue);
266 }
267 mutex_unlock(&dev->struct_mutex);
268 }
269
270 if (dev->master == file_priv->master)
271 drm_drop_master(dev, file_priv);
272out:
273 /* drop the master reference held by the file priv */
274 if (file_priv->master)
275 drm_master_put(&file_priv->master);
276 mutex_unlock(&dev->master_mutex);
277}
278
279/**
280 * drm_is_current_master - checks whether @priv is the current master
281 * @fpriv: DRM file private
282 *
283 * Checks whether @fpriv is current master on its device. This decides whether a
284 * client is allowed to run DRM_MASTER IOCTLs.
285 *
286 * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
287 * - the current master is assumed to own the non-shareable display hardware.
288 */
289bool drm_is_current_master(struct drm_file *fpriv)
290{
291 return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
292}
293EXPORT_SYMBOL(drm_is_current_master);
294
295/**
296 * drm_master_get - reference a master pointer
297 * @master: struct &drm_master
298 *
299 * Increments the reference count of @master and returns a pointer to @master.
300 */
301struct drm_master *drm_master_get(struct drm_master *master)
302{
303 kref_get(&master->refcount);
304 return master;
305}
306EXPORT_SYMBOL(drm_master_get);
307
308static void drm_master_destroy(struct kref *kref)
309{
310 struct drm_master *master = container_of(kref, struct drm_master, refcount);
311 struct drm_device *dev = master->dev;
312
313 if (dev->driver->master_destroy)
314 dev->driver->master_destroy(dev, master);
315
316 drm_legacy_master_rmmaps(dev, master);
317
318 idr_destroy(&master->magic_map);
319 kfree(master->unique);
320 kfree(master);
321}
322
323/**
324 * drm_master_put - unreference and clear a master pointer
325 * @master: pointer to a pointer of struct &drm_master
326 *
327 * This decrements the &drm_master behind @master and sets it to NULL.
328 */
329void drm_master_put(struct drm_master **master)
330{
331 kref_put(&(*master)->refcount, drm_master_destroy);
332 *master = NULL;
333}
334EXPORT_SYMBOL(drm_master_put);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404abd0..255543086590 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
36 * encoder chain. 36 * encoder chain.
37 * 37 *
38 * A bridge is always attached to a single &drm_encoder at a time, but can be 38 * A bridge is always attached to a single &drm_encoder at a time, but can be
39 * either connected to it directly, or through an intermediate bridge: 39 * either connected to it directly, or through an intermediate bridge::
40 * 40 *
41 * encoder ---> bridge B ---> bridge A 41 * encoder ---> bridge B ---> bridge A
42 * 42 *
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 9b34158c0f77..c3a12cd8bd0d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
51 */ 51 */
52 if (!entry->map || 52 if (!entry->map ||
53 map->type != entry->map->type || 53 map->type != entry->map->type ||
54 entry->master != dev->primary->master) 54 entry->master != dev->master)
55 continue; 55 continue;
56 switch (map->type) { 56 switch (map->type) {
57 case _DRM_SHM: 57 case _DRM_SHM:
@@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
245 map->offset = (unsigned long)map->handle; 245 map->offset = (unsigned long)map->handle;
246 if (map->flags & _DRM_CONTAINS_LOCK) { 246 if (map->flags & _DRM_CONTAINS_LOCK) {
247 /* Prevent a 2nd X Server from creating a 2nd lock */ 247 /* Prevent a 2nd X Server from creating a 2nd lock */
248 if (dev->primary->master->lock.hw_lock != NULL) { 248 if (dev->master->lock.hw_lock != NULL) {
249 vfree(map->handle); 249 vfree(map->handle);
250 kfree(map); 250 kfree(map);
251 return -EBUSY; 251 return -EBUSY;
252 } 252 }
253 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 253 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
254 } 254 }
255 break; 255 break;
256 case _DRM_AGP: { 256 case _DRM_AGP: {
@@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
356 mutex_unlock(&dev->struct_mutex); 356 mutex_unlock(&dev->struct_mutex);
357 357
358 if (!(map->flags & _DRM_DRIVER)) 358 if (!(map->flags & _DRM_DRIVER))
359 list->master = dev->primary->master; 359 list->master = dev->master;
360 *maplist = list; 360 *maplist = list;
361 return 0; 361 return 0;
362} 362}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d2a6d958ca76..fd93e9c79d28 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -39,6 +39,7 @@
39#include <drm/drm_fourcc.h> 39#include <drm/drm_fourcc.h>
40#include <drm/drm_modeset_lock.h> 40#include <drm/drm_modeset_lock.h>
41#include <drm/drm_atomic.h> 41#include <drm/drm_atomic.h>
42#include <drm/drm_auth.h>
42 43
43#include "drm_crtc_internal.h" 44#include "drm_crtc_internal.h"
44#include "drm_internal.h" 45#include "drm_internal.h"
@@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
239} 240}
240EXPORT_SYMBOL(drm_get_subpixel_order_name); 241EXPORT_SYMBOL(drm_get_subpixel_order_name);
241 242
242static char printable_char(int c)
243{
244 return isascii(c) && isprint(c) ? c : '?';
245}
246
247/**
248 * drm_get_format_name - return a string for drm fourcc format
249 * @format: format to compute name of
250 *
251 * Note that the buffer used by this function is globally shared and owned by
252 * the function itself.
253 *
254 * FIXME: This isn't really multithreading safe.
255 */
256const char *drm_get_format_name(uint32_t format)
257{
258 static char buf[32];
259
260 snprintf(buf, sizeof(buf),
261 "%c%c%c%c %s-endian (0x%08x)",
262 printable_char(format & 0xff),
263 printable_char((format >> 8) & 0xff),
264 printable_char((format >> 16) & 0xff),
265 printable_char((format >> 24) & 0x7f),
266 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
267 format);
268
269 return buf;
270}
271EXPORT_SYMBOL(drm_get_format_name);
272
273/* 243/*
274 * Internal function to assign a slot in the object idr and optionally 244 * Internal function to assign a slot in the object idr and optionally
275 * register the object into the idr. 245 * register the object into the idr.
@@ -535,7 +505,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
535 * 505 *
536 * Cleanup framebuffer. This function is intended to be used from the drivers 506 * Cleanup framebuffer. This function is intended to be used from the drivers
537 * ->destroy callback. It can also be used to clean up driver private 507 * ->destroy callback. It can also be used to clean up driver private
538 * framebuffers embedded into a larger structure. 508 * framebuffers embedded into a larger structure.
539 * 509 *
540 * Note that this function does not remove the fb from active usuage - if it is 510 * Note that this function does not remove the fb from active usuage - if it is
541 * still used anywhere, hilarity can ensue since userspace could call getfb on 511 * still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -639,6 +609,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
639 return num; 609 return num;
640} 610}
641 611
612static int drm_crtc_register_all(struct drm_device *dev)
613{
614 struct drm_crtc *crtc;
615 int ret = 0;
616
617 drm_for_each_crtc(crtc, dev) {
618 if (crtc->funcs->late_register)
619 ret = crtc->funcs->late_register(crtc);
620 if (ret)
621 return ret;
622 }
623
624 return 0;
625}
626
627static void drm_crtc_unregister_all(struct drm_device *dev)
628{
629 struct drm_crtc *crtc;
630
631 drm_for_each_crtc(crtc, dev) {
632 if (crtc->funcs->early_unregister)
633 crtc->funcs->early_unregister(crtc);
634 }
635}
636
642/** 637/**
643 * drm_crtc_init_with_planes - Initialise a new CRTC object with 638 * drm_crtc_init_with_planes - Initialise a new CRTC object with
644 * specified primary and cursor planes. 639 * specified primary and cursor planes.
@@ -669,6 +664,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
669 crtc->dev = dev; 664 crtc->dev = dev;
670 crtc->funcs = funcs; 665 crtc->funcs = funcs;
671 666
667 INIT_LIST_HEAD(&crtc->commit_list);
668 spin_lock_init(&crtc->commit_lock);
669
672 drm_modeset_lock_init(&crtc->mutex); 670 drm_modeset_lock_init(&crtc->mutex);
673 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 671 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
674 if (ret) 672 if (ret)
@@ -692,7 +690,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
692 crtc->base.properties = &crtc->properties; 690 crtc->base.properties = &crtc->properties;
693 691
694 list_add_tail(&crtc->head, &config->crtc_list); 692 list_add_tail(&crtc->head, &config->crtc_list);
695 config->num_crtc++; 693 crtc->index = config->num_crtc++;
696 694
697 crtc->primary = primary; 695 crtc->primary = primary;
698 crtc->cursor = cursor; 696 crtc->cursor = cursor;
@@ -722,6 +720,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
722{ 720{
723 struct drm_device *dev = crtc->dev; 721 struct drm_device *dev = crtc->dev;
724 722
723 /* Note that the crtc_list is considered to be static; should we
724 * remove the drm_crtc at runtime we would have to decrement all
725 * the indices on the drm_crtc after us in the crtc_list.
726 */
727
725 kfree(crtc->gamma_store); 728 kfree(crtc->gamma_store);
726 crtc->gamma_store = NULL; 729 crtc->gamma_store = NULL;
727 730
@@ -741,29 +744,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
741} 744}
742EXPORT_SYMBOL(drm_crtc_cleanup); 745EXPORT_SYMBOL(drm_crtc_cleanup);
743 746
744/**
745 * drm_crtc_index - find the index of a registered CRTC
746 * @crtc: CRTC to find index for
747 *
748 * Given a registered CRTC, return the index of that CRTC within a DRM
749 * device's list of CRTCs.
750 */
751unsigned int drm_crtc_index(struct drm_crtc *crtc)
752{
753 unsigned int index = 0;
754 struct drm_crtc *tmp;
755
756 drm_for_each_crtc(tmp, crtc->dev) {
757 if (tmp == crtc)
758 return index;
759
760 index++;
761 }
762
763 BUG();
764}
765EXPORT_SYMBOL(drm_crtc_index);
766
767/* 747/*
768 * drm_mode_remove - remove and free a mode 748 * drm_mode_remove - remove and free a mode
769 * @connector: connector list to modify 749 * @connector: connector list to modify
@@ -984,6 +964,12 @@ void drm_connector_cleanup(struct drm_connector *connector)
984 struct drm_device *dev = connector->dev; 964 struct drm_device *dev = connector->dev;
985 struct drm_display_mode *mode, *t; 965 struct drm_display_mode *mode, *t;
986 966
967 /* The connector should have been removed from userspace long before
968 * it is finally destroyed.
969 */
970 if (WARN_ON(connector->registered))
971 drm_connector_unregister(connector);
972
987 if (connector->tile_group) { 973 if (connector->tile_group) {
988 drm_mode_put_tile_group(dev, connector->tile_group); 974 drm_mode_put_tile_group(dev, connector->tile_group);
989 connector->tile_group = NULL; 975 connector->tile_group = NULL;
@@ -1030,19 +1016,34 @@ int drm_connector_register(struct drm_connector *connector)
1030{ 1016{
1031 int ret; 1017 int ret;
1032 1018
1019 if (connector->registered)
1020 return 0;
1021
1033 ret = drm_sysfs_connector_add(connector); 1022 ret = drm_sysfs_connector_add(connector);
1034 if (ret) 1023 if (ret)
1035 return ret; 1024 return ret;
1036 1025
1037 ret = drm_debugfs_connector_add(connector); 1026 ret = drm_debugfs_connector_add(connector);
1038 if (ret) { 1027 if (ret) {
1039 drm_sysfs_connector_remove(connector); 1028 goto err_sysfs;
1040 return ret; 1029 }
1030
1031 if (connector->funcs->late_register) {
1032 ret = connector->funcs->late_register(connector);
1033 if (ret)
1034 goto err_debugfs;
1041 } 1035 }
1042 1036
1043 drm_mode_object_register(connector->dev, &connector->base); 1037 drm_mode_object_register(connector->dev, &connector->base);
1044 1038
1039 connector->registered = true;
1045 return 0; 1040 return 0;
1041
1042err_debugfs:
1043 drm_debugfs_connector_remove(connector);
1044err_sysfs:
1045 drm_sysfs_connector_remove(connector);
1046 return ret;
1046} 1047}
1047EXPORT_SYMBOL(drm_connector_register); 1048EXPORT_SYMBOL(drm_connector_register);
1048 1049
@@ -1054,8 +1055,16 @@ EXPORT_SYMBOL(drm_connector_register);
1054 */ 1055 */
1055void drm_connector_unregister(struct drm_connector *connector) 1056void drm_connector_unregister(struct drm_connector *connector)
1056{ 1057{
1058 if (!connector->registered)
1059 return;
1060
1061 if (connector->funcs->early_unregister)
1062 connector->funcs->early_unregister(connector);
1063
1057 drm_sysfs_connector_remove(connector); 1064 drm_sysfs_connector_remove(connector);
1058 drm_debugfs_connector_remove(connector); 1065 drm_debugfs_connector_remove(connector);
1066
1067 connector->registered = false;
1059} 1068}
1060EXPORT_SYMBOL(drm_connector_unregister); 1069EXPORT_SYMBOL(drm_connector_unregister);
1061 1070
@@ -1064,9 +1073,9 @@ EXPORT_SYMBOL(drm_connector_unregister);
1064 * @dev: drm device 1073 * @dev: drm device
1065 * 1074 *
1066 * This function registers all connectors in sysfs and other places so that 1075 * This function registers all connectors in sysfs and other places so that
1067 * userspace can start to access them. Drivers can call it after calling 1076 * userspace can start to access them. drm_connector_register_all() is called
1068 * drm_dev_register() to complete the device registration, if they don't call 1077 * automatically from drm_dev_register() to complete the device registration,
1069 * drm_connector_register() on each connector individually. 1078 * if they don't call drm_connector_register() on each connector individually.
1070 * 1079 *
1071 * When a device is unplugged and should be removed from userspace access, 1080 * When a device is unplugged and should be removed from userspace access,
1072 * call drm_connector_unregister_all(), which is the inverse of this 1081 * call drm_connector_unregister_all(), which is the inverse of this
@@ -1119,6 +1128,31 @@ void drm_connector_unregister_all(struct drm_device *dev)
1119} 1128}
1120EXPORT_SYMBOL(drm_connector_unregister_all); 1129EXPORT_SYMBOL(drm_connector_unregister_all);
1121 1130
1131static int drm_encoder_register_all(struct drm_device *dev)
1132{
1133 struct drm_encoder *encoder;
1134 int ret = 0;
1135
1136 drm_for_each_encoder(encoder, dev) {
1137 if (encoder->funcs->late_register)
1138 ret = encoder->funcs->late_register(encoder);
1139 if (ret)
1140 return ret;
1141 }
1142
1143 return 0;
1144}
1145
1146static void drm_encoder_unregister_all(struct drm_device *dev)
1147{
1148 struct drm_encoder *encoder;
1149
1150 drm_for_each_encoder(encoder, dev) {
1151 if (encoder->funcs->early_unregister)
1152 encoder->funcs->early_unregister(encoder);
1153 }
1154}
1155
1122/** 1156/**
1123 * drm_encoder_init - Init a preallocated encoder 1157 * drm_encoder_init - Init a preallocated encoder
1124 * @dev: drm device 1158 * @dev: drm device
@@ -1166,7 +1200,7 @@ int drm_encoder_init(struct drm_device *dev,
1166 } 1200 }
1167 1201
1168 list_add_tail(&encoder->head, &dev->mode_config.encoder_list); 1202 list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
1169 dev->mode_config.num_encoder++; 1203 encoder->index = dev->mode_config.num_encoder++;
1170 1204
1171out_put: 1205out_put:
1172 if (ret) 1206 if (ret)
@@ -1180,29 +1214,6 @@ out_unlock:
1180EXPORT_SYMBOL(drm_encoder_init); 1214EXPORT_SYMBOL(drm_encoder_init);
1181 1215
1182/** 1216/**
1183 * drm_encoder_index - find the index of a registered encoder
1184 * @encoder: encoder to find index for
1185 *
1186 * Given a registered encoder, return the index of that encoder within a DRM
1187 * device's list of encoders.
1188 */
1189unsigned int drm_encoder_index(struct drm_encoder *encoder)
1190{
1191 unsigned int index = 0;
1192 struct drm_encoder *tmp;
1193
1194 drm_for_each_encoder(tmp, encoder->dev) {
1195 if (tmp == encoder)
1196 return index;
1197
1198 index++;
1199 }
1200
1201 BUG();
1202}
1203EXPORT_SYMBOL(drm_encoder_index);
1204
1205/**
1206 * drm_encoder_cleanup - cleans up an initialised encoder 1217 * drm_encoder_cleanup - cleans up an initialised encoder
1207 * @encoder: encoder to cleanup 1218 * @encoder: encoder to cleanup
1208 * 1219 *
@@ -1212,6 +1223,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1212{ 1223{
1213 struct drm_device *dev = encoder->dev; 1224 struct drm_device *dev = encoder->dev;
1214 1225
1226 /* Note that the encoder_list is considered to be static; should we
1227 * remove the drm_encoder at runtime we would have to decrement all
1228 * the indices on the drm_encoder after us in the encoder_list.
1229 */
1230
1215 drm_modeset_lock_all(dev); 1231 drm_modeset_lock_all(dev);
1216 drm_mode_object_unregister(dev, &encoder->base); 1232 drm_mode_object_unregister(dev, &encoder->base);
1217 kfree(encoder->name); 1233 kfree(encoder->name);
@@ -1300,7 +1316,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1300 plane->type = type; 1316 plane->type = type;
1301 1317
1302 list_add_tail(&plane->head, &config->plane_list); 1318 list_add_tail(&plane->head, &config->plane_list);
1303 config->num_total_plane++; 1319 plane->index = config->num_total_plane++;
1304 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1320 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1305 config->num_overlay_plane++; 1321 config->num_overlay_plane++;
1306 1322
@@ -1325,6 +1341,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1325} 1341}
1326EXPORT_SYMBOL(drm_universal_plane_init); 1342EXPORT_SYMBOL(drm_universal_plane_init);
1327 1343
1344static int drm_plane_register_all(struct drm_device *dev)
1345{
1346 struct drm_plane *plane;
1347 int ret = 0;
1348
1349 drm_for_each_plane(plane, dev) {
1350 if (plane->funcs->late_register)
1351 ret = plane->funcs->late_register(plane);
1352 if (ret)
1353 return ret;
1354 }
1355
1356 return 0;
1357}
1358
1359static void drm_plane_unregister_all(struct drm_device *dev)
1360{
1361 struct drm_plane *plane;
1362
1363 drm_for_each_plane(plane, dev) {
1364 if (plane->funcs->early_unregister)
1365 plane->funcs->early_unregister(plane);
1366 }
1367}
1368
1328/** 1369/**
1329 * drm_plane_init - Initialize a legacy plane 1370 * drm_plane_init - Initialize a legacy plane
1330 * @dev: DRM device 1371 * @dev: DRM device
@@ -1374,6 +1415,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
1374 1415
1375 BUG_ON(list_empty(&plane->head)); 1416 BUG_ON(list_empty(&plane->head));
1376 1417
1418 /* Note that the plane_list is considered to be static; should we
1419 * remove the drm_plane at runtime we would have to decrement all
1420 * the indices on the drm_plane after us in the plane_list.
1421 */
1422
1377 list_del(&plane->head); 1423 list_del(&plane->head);
1378 dev->mode_config.num_total_plane--; 1424 dev->mode_config.num_total_plane--;
1379 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1425 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1437,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
1391EXPORT_SYMBOL(drm_plane_cleanup); 1437EXPORT_SYMBOL(drm_plane_cleanup);
1392 1438
1393/** 1439/**
1394 * drm_plane_index - find the index of a registered plane
1395 * @plane: plane to find index for
1396 *
1397 * Given a registered plane, return the index of that CRTC within a DRM
1398 * device's list of planes.
1399 */
1400unsigned int drm_plane_index(struct drm_plane *plane)
1401{
1402 unsigned int index = 0;
1403 struct drm_plane *tmp;
1404
1405 drm_for_each_plane(tmp, plane->dev) {
1406 if (tmp == plane)
1407 return index;
1408
1409 index++;
1410 }
1411
1412 BUG();
1413}
1414EXPORT_SYMBOL(drm_plane_index);
1415
1416/**
1417 * drm_plane_from_index - find the registered plane at an index 1440 * drm_plane_from_index - find the registered plane at an index
1418 * @dev: DRM device 1441 * @dev: DRM device
1419 * @idx: index of registered plane to find for 1442 * @idx: index of registered plane to find for
@@ -1425,13 +1448,11 @@ struct drm_plane *
1425drm_plane_from_index(struct drm_device *dev, int idx) 1448drm_plane_from_index(struct drm_device *dev, int idx)
1426{ 1449{
1427 struct drm_plane *plane; 1450 struct drm_plane *plane;
1428 unsigned int i = 0;
1429 1451
1430 drm_for_each_plane(plane, dev) { 1452 drm_for_each_plane(plane, dev)
1431 if (i == idx) 1453 if (idx == plane->index)
1432 return plane; 1454 return plane;
1433 i++; 1455
1434 }
1435 return NULL; 1456 return NULL;
1436} 1457}
1437EXPORT_SYMBOL(drm_plane_from_index); 1458EXPORT_SYMBOL(drm_plane_from_index);
@@ -1467,6 +1488,46 @@ void drm_plane_force_disable(struct drm_plane *plane)
1467} 1488}
1468EXPORT_SYMBOL(drm_plane_force_disable); 1489EXPORT_SYMBOL(drm_plane_force_disable);
1469 1490
1491int drm_modeset_register_all(struct drm_device *dev)
1492{
1493 int ret;
1494
1495 ret = drm_plane_register_all(dev);
1496 if (ret)
1497 goto err_plane;
1498
1499 ret = drm_crtc_register_all(dev);
1500 if (ret)
1501 goto err_crtc;
1502
1503 ret = drm_encoder_register_all(dev);
1504 if (ret)
1505 goto err_encoder;
1506
1507 ret = drm_connector_register_all(dev);
1508 if (ret)
1509 goto err_connector;
1510
1511 return 0;
1512
1513err_connector:
1514 drm_encoder_unregister_all(dev);
1515err_encoder:
1516 drm_crtc_unregister_all(dev);
1517err_crtc:
1518 drm_plane_unregister_all(dev);
1519err_plane:
1520 return ret;
1521}
1522
1523void drm_modeset_unregister_all(struct drm_device *dev)
1524{
1525 drm_connector_unregister_all(dev);
1526 drm_encoder_unregister_all(dev);
1527 drm_crtc_unregister_all(dev);
1528 drm_plane_unregister_all(dev);
1529}
1530
1470static int drm_mode_create_standard_properties(struct drm_device *dev) 1531static int drm_mode_create_standard_properties(struct drm_device *dev)
1471{ 1532{
1472 struct drm_property *prop; 1533 struct drm_property *prop;
@@ -2821,8 +2882,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2821 goto out; 2882 goto out;
2822 } 2883 }
2823 2884
2824 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2825
2826 /* 2885 /*
2827 * Check whether the primary plane supports the fb pixel format. 2886 * Check whether the primary plane supports the fb pixel format.
2828 * Drivers not implementing the universal planes API use a 2887 * Drivers not implementing the universal planes API use a
@@ -2977,6 +3036,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2977 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 3036 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2978 return PTR_ERR(fb); 3037 return PTR_ERR(fb);
2979 } 3038 }
3039 fb->hot_x = req->hot_x;
3040 fb->hot_y = req->hot_y;
2980 } else { 3041 } else {
2981 fb = NULL; 3042 fb = NULL;
2982 } 3043 }
@@ -3583,7 +3644,7 @@ int drm_mode_getfb(struct drm_device *dev,
3583 r->bpp = fb->bits_per_pixel; 3644 r->bpp = fb->bits_per_pixel;
3584 r->pitch = fb->pitches[0]; 3645 r->pitch = fb->pitches[0];
3585 if (fb->funcs->create_handle) { 3646 if (fb->funcs->create_handle) {
3586 if (file_priv->is_master || capable(CAP_SYS_ADMIN) || 3647 if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
3587 drm_is_control_client(file_priv)) { 3648 drm_is_control_client(file_priv)) {
3588 ret = fb->funcs->create_handle(fb, file_priv, 3649 ret = fb->funcs->create_handle(fb, file_priv,
3589 &r->handle); 3650 &r->handle);
@@ -3740,6 +3801,13 @@ void drm_fb_release(struct drm_file *priv)
3740 } 3801 }
3741} 3802}
3742 3803
3804static bool drm_property_type_valid(struct drm_property *property)
3805{
3806 if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
3807 return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
3808 return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
3809}
3810
3743/** 3811/**
3744 * drm_property_create - create a new property type 3812 * drm_property_create - create a new property type
3745 * @dev: drm device 3813 * @dev: drm device
@@ -4841,7 +4909,8 @@ bool drm_property_change_valid_get(struct drm_property *property,
4841 if (value == 0) 4909 if (value == 0)
4842 return true; 4910 return true;
4843 4911
4844 return _object_find(property->dev, value, property->values[0]) != NULL; 4912 *ref = _object_find(property->dev, value, property->values[0]);
4913 return *ref != NULL;
4845 } 4914 }
4846 4915
4847 for (i = 0; i < property->num_values; i++) 4916 for (i = 0; i < property->num_values; i++)
@@ -5139,6 +5208,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
5139int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 5208int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5140 int gamma_size) 5209 int gamma_size)
5141{ 5210{
5211 uint16_t *r_base, *g_base, *b_base;
5212 int i;
5213
5142 crtc->gamma_size = gamma_size; 5214 crtc->gamma_size = gamma_size;
5143 5215
5144 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, 5216 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5148,6 +5220,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5148 return -ENOMEM; 5220 return -ENOMEM;
5149 } 5221 }
5150 5222
5223 r_base = crtc->gamma_store;
5224 g_base = r_base + gamma_size;
5225 b_base = g_base + gamma_size;
5226 for (i = 0; i < gamma_size; i++) {
5227 r_base[i] = i << 8;
5228 g_base[i] = i << 8;
5229 b_base[i] = i << 8;
5230 }
5231
5232
5151 return 0; 5233 return 0;
5152} 5234}
5153EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); 5235EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5215,7 +5297,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
5215 goto out; 5297 goto out;
5216 } 5298 }
5217 5299
5218 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 5300 ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
5219 5301
5220out: 5302out:
5221 drm_modeset_unlock_all(dev); 5303 drm_modeset_unlock_all(dev);
@@ -5545,264 +5627,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
5545} 5627}
5546 5628
5547/** 5629/**
5548 * drm_fb_get_bpp_depth - get the bpp/depth values for format
5549 * @format: pixel format (DRM_FORMAT_*)
5550 * @depth: storage for the depth value
5551 * @bpp: storage for the bpp value
5552 *
5553 * This only supports RGB formats here for compat with code that doesn't use
5554 * pixel formats directly yet.
5555 */
5556void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
5557 int *bpp)
5558{
5559 switch (format) {
5560 case DRM_FORMAT_C8:
5561 case DRM_FORMAT_RGB332:
5562 case DRM_FORMAT_BGR233:
5563 *depth = 8;
5564 *bpp = 8;
5565 break;
5566 case DRM_FORMAT_XRGB1555:
5567 case DRM_FORMAT_XBGR1555:
5568 case DRM_FORMAT_RGBX5551:
5569 case DRM_FORMAT_BGRX5551:
5570 case DRM_FORMAT_ARGB1555:
5571 case DRM_FORMAT_ABGR1555:
5572 case DRM_FORMAT_RGBA5551:
5573 case DRM_FORMAT_BGRA5551:
5574 *depth = 15;
5575 *bpp = 16;
5576 break;
5577 case DRM_FORMAT_RGB565:
5578 case DRM_FORMAT_BGR565:
5579 *depth = 16;
5580 *bpp = 16;
5581 break;
5582 case DRM_FORMAT_RGB888:
5583 case DRM_FORMAT_BGR888:
5584 *depth = 24;
5585 *bpp = 24;
5586 break;
5587 case DRM_FORMAT_XRGB8888:
5588 case DRM_FORMAT_XBGR8888:
5589 case DRM_FORMAT_RGBX8888:
5590 case DRM_FORMAT_BGRX8888:
5591 *depth = 24;
5592 *bpp = 32;
5593 break;
5594 case DRM_FORMAT_XRGB2101010:
5595 case DRM_FORMAT_XBGR2101010:
5596 case DRM_FORMAT_RGBX1010102:
5597 case DRM_FORMAT_BGRX1010102:
5598 case DRM_FORMAT_ARGB2101010:
5599 case DRM_FORMAT_ABGR2101010:
5600 case DRM_FORMAT_RGBA1010102:
5601 case DRM_FORMAT_BGRA1010102:
5602 *depth = 30;
5603 *bpp = 32;
5604 break;
5605 case DRM_FORMAT_ARGB8888:
5606 case DRM_FORMAT_ABGR8888:
5607 case DRM_FORMAT_RGBA8888:
5608 case DRM_FORMAT_BGRA8888:
5609 *depth = 32;
5610 *bpp = 32;
5611 break;
5612 default:
5613 DRM_DEBUG_KMS("unsupported pixel format %s\n",
5614 drm_get_format_name(format));
5615 *depth = 0;
5616 *bpp = 0;
5617 break;
5618 }
5619}
5620EXPORT_SYMBOL(drm_fb_get_bpp_depth);
5621
5622/**
5623 * drm_format_num_planes - get the number of planes for format
5624 * @format: pixel format (DRM_FORMAT_*)
5625 *
5626 * Returns:
5627 * The number of planes used by the specified pixel format.
5628 */
5629int drm_format_num_planes(uint32_t format)
5630{
5631 switch (format) {
5632 case DRM_FORMAT_YUV410:
5633 case DRM_FORMAT_YVU410:
5634 case DRM_FORMAT_YUV411:
5635 case DRM_FORMAT_YVU411:
5636 case DRM_FORMAT_YUV420:
5637 case DRM_FORMAT_YVU420:
5638 case DRM_FORMAT_YUV422:
5639 case DRM_FORMAT_YVU422:
5640 case DRM_FORMAT_YUV444:
5641 case DRM_FORMAT_YVU444:
5642 return 3;
5643 case DRM_FORMAT_NV12:
5644 case DRM_FORMAT_NV21:
5645 case DRM_FORMAT_NV16:
5646 case DRM_FORMAT_NV61:
5647 case DRM_FORMAT_NV24:
5648 case DRM_FORMAT_NV42:
5649 return 2;
5650 default:
5651 return 1;
5652 }
5653}
5654EXPORT_SYMBOL(drm_format_num_planes);
5655
5656/**
5657 * drm_format_plane_cpp - determine the bytes per pixel value
5658 * @format: pixel format (DRM_FORMAT_*)
5659 * @plane: plane index
5660 *
5661 * Returns:
5662 * The bytes per pixel value for the specified plane.
5663 */
5664int drm_format_plane_cpp(uint32_t format, int plane)
5665{
5666 unsigned int depth;
5667 int bpp;
5668
5669 if (plane >= drm_format_num_planes(format))
5670 return 0;
5671
5672 switch (format) {
5673 case DRM_FORMAT_YUYV:
5674 case DRM_FORMAT_YVYU:
5675 case DRM_FORMAT_UYVY:
5676 case DRM_FORMAT_VYUY:
5677 return 2;
5678 case DRM_FORMAT_NV12:
5679 case DRM_FORMAT_NV21:
5680 case DRM_FORMAT_NV16:
5681 case DRM_FORMAT_NV61:
5682 case DRM_FORMAT_NV24:
5683 case DRM_FORMAT_NV42:
5684 return plane ? 2 : 1;
5685 case DRM_FORMAT_YUV410:
5686 case DRM_FORMAT_YVU410:
5687 case DRM_FORMAT_YUV411:
5688 case DRM_FORMAT_YVU411:
5689 case DRM_FORMAT_YUV420:
5690 case DRM_FORMAT_YVU420:
5691 case DRM_FORMAT_YUV422:
5692 case DRM_FORMAT_YVU422:
5693 case DRM_FORMAT_YUV444:
5694 case DRM_FORMAT_YVU444:
5695 return 1;
5696 default:
5697 drm_fb_get_bpp_depth(format, &depth, &bpp);
5698 return bpp >> 3;
5699 }
5700}
5701EXPORT_SYMBOL(drm_format_plane_cpp);
5702
5703/**
5704 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
5705 * @format: pixel format (DRM_FORMAT_*)
5706 *
5707 * Returns:
5708 * The horizontal chroma subsampling factor for the
5709 * specified pixel format.
5710 */
5711int drm_format_horz_chroma_subsampling(uint32_t format)
5712{
5713 switch (format) {
5714 case DRM_FORMAT_YUV411:
5715 case DRM_FORMAT_YVU411:
5716 case DRM_FORMAT_YUV410:
5717 case DRM_FORMAT_YVU410:
5718 return 4;
5719 case DRM_FORMAT_YUYV:
5720 case DRM_FORMAT_YVYU:
5721 case DRM_FORMAT_UYVY:
5722 case DRM_FORMAT_VYUY:
5723 case DRM_FORMAT_NV12:
5724 case DRM_FORMAT_NV21:
5725 case DRM_FORMAT_NV16:
5726 case DRM_FORMAT_NV61:
5727 case DRM_FORMAT_YUV422:
5728 case DRM_FORMAT_YVU422:
5729 case DRM_FORMAT_YUV420:
5730 case DRM_FORMAT_YVU420:
5731 return 2;
5732 default:
5733 return 1;
5734 }
5735}
5736EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
5737
5738/**
5739 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
5740 * @format: pixel format (DRM_FORMAT_*)
5741 *
5742 * Returns:
5743 * The vertical chroma subsampling factor for the
5744 * specified pixel format.
5745 */
5746int drm_format_vert_chroma_subsampling(uint32_t format)
5747{
5748 switch (format) {
5749 case DRM_FORMAT_YUV410:
5750 case DRM_FORMAT_YVU410:
5751 return 4;
5752 case DRM_FORMAT_YUV420:
5753 case DRM_FORMAT_YVU420:
5754 case DRM_FORMAT_NV12:
5755 case DRM_FORMAT_NV21:
5756 return 2;
5757 default:
5758 return 1;
5759 }
5760}
5761EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
5762
5763/**
5764 * drm_format_plane_width - width of the plane given the first plane
5765 * @width: width of the first plane
5766 * @format: pixel format
5767 * @plane: plane index
5768 *
5769 * Returns:
5770 * The width of @plane, given that the width of the first plane is @width.
5771 */
5772int drm_format_plane_width(int width, uint32_t format, int plane)
5773{
5774 if (plane >= drm_format_num_planes(format))
5775 return 0;
5776
5777 if (plane == 0)
5778 return width;
5779
5780 return width / drm_format_horz_chroma_subsampling(format);
5781}
5782EXPORT_SYMBOL(drm_format_plane_width);
5783
5784/**
5785 * drm_format_plane_height - height of the plane given the first plane
5786 * @height: height of the first plane
5787 * @format: pixel format
5788 * @plane: plane index
5789 *
5790 * Returns:
5791 * The height of @plane, given that the height of the first plane is @height.
5792 */
5793int drm_format_plane_height(int height, uint32_t format, int plane)
5794{
5795 if (plane >= drm_format_num_planes(format))
5796 return 0;
5797
5798 if (plane == 0)
5799 return height;
5800
5801 return height / drm_format_vert_chroma_subsampling(format);
5802}
5803EXPORT_SYMBOL(drm_format_plane_height);
5804
5805/**
5806 * drm_rotation_simplify() - Try to simplify the rotation 5630 * drm_rotation_simplify() - Try to simplify the rotation
5807 * @rotation: Rotation to be simplified 5631 * @rotation: Rotation to be simplified
5808 * @supported_rotations: Supported rotations 5632 * @supported_rotations: Supported rotations
@@ -6065,3 +5889,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
6065 return tg; 5889 return tg;
6066} 5890}
6067EXPORT_SYMBOL(drm_mode_create_tile_group); 5891EXPORT_SYMBOL(drm_mode_create_tile_group);
5892
5893/**
5894 * drm_crtc_enable_color_mgmt - enable color management properties
5895 * @crtc: DRM CRTC
5896 * @degamma_lut_size: the size of the degamma lut (before CSC)
5897 * @has_ctm: whether to attach ctm_property for CSC matrix
5898 * @gamma_lut_size: the size of the gamma lut (after CSC)
5899 *
5900 * This function lets the driver enable the color correction
5901 * properties on a CRTC. This includes 3 degamma, csc and gamma
5902 * properties that userspace can set and 2 size properties to inform
5903 * the userspace of the lut sizes. Each of the properties are
5904 * optional. The gamma and degamma properties are only attached if
5905 * their size is not 0 and ctm_property is only attached if has_ctm is
5906 * true.
5907 */
5908void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
5909 uint degamma_lut_size,
5910 bool has_ctm,
5911 uint gamma_lut_size)
5912{
5913 struct drm_device *dev = crtc->dev;
5914 struct drm_mode_config *config = &dev->mode_config;
5915
5916 if (degamma_lut_size) {
5917 drm_object_attach_property(&crtc->base,
5918 config->degamma_lut_property, 0);
5919 drm_object_attach_property(&crtc->base,
5920 config->degamma_lut_size_property,
5921 degamma_lut_size);
5922 }
5923
5924 if (has_ctm)
5925 drm_object_attach_property(&crtc->base,
5926 config->ctm_property, 0);
5927
5928 if (gamma_lut_size) {
5929 drm_object_attach_property(&crtc->base,
5930 config->gamma_lut_property, 0);
5931 drm_object_attach_property(&crtc->base,
5932 config->gamma_lut_size_property,
5933 gamma_lut_size);
5934 }
5935}
5936EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a6e42433ef0e..604d3ef72ffa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
232 */ 232 */
233void drm_helper_disable_unused_functions(struct drm_device *dev) 233void drm_helper_disable_unused_functions(struct drm_device *dev)
234{ 234{
235 if (drm_core_check_feature(dev, DRIVER_ATOMIC))
236 DRM_ERROR("Called for atomic driver, this is not what you want.\n");
237
235 drm_modeset_lock_all(dev); 238 drm_modeset_lock_all(dev);
236 __drm_helper_disable_unused_functions(dev); 239 __drm_helper_disable_unused_functions(dev);
237 drm_modeset_unlock_all(dev); 240 drm_modeset_unlock_all(dev);
@@ -528,11 +531,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
528int drm_crtc_helper_set_config(struct drm_mode_set *set) 531int drm_crtc_helper_set_config(struct drm_mode_set *set)
529{ 532{
530 struct drm_device *dev; 533 struct drm_device *dev;
531 struct drm_crtc *new_crtc; 534 struct drm_crtc **save_encoder_crtcs, *new_crtc;
532 struct drm_encoder *save_encoders, *new_encoder, *encoder; 535 struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
533 bool mode_changed = false; /* if true do a full mode set */ 536 bool mode_changed = false; /* if true do a full mode set */
534 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 537 bool fb_changed = false; /* if true and !mode_changed just do a flip */
535 struct drm_connector *save_connectors, *connector; 538 struct drm_connector *connector;
536 int count = 0, ro, fail = 0; 539 int count = 0, ro, fail = 0;
537 const struct drm_crtc_helper_funcs *crtc_funcs; 540 const struct drm_crtc_helper_funcs *crtc_funcs;
538 struct drm_mode_set save_set; 541 struct drm_mode_set save_set;
@@ -574,15 +577,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
574 * Allocate space for the backup of all (non-pointer) encoder and 577 * Allocate space for the backup of all (non-pointer) encoder and
575 * connector data. 578 * connector data.
576 */ 579 */
577 save_encoders = kzalloc(dev->mode_config.num_encoder * 580 save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
578 sizeof(struct drm_encoder), GFP_KERNEL); 581 sizeof(struct drm_crtc *), GFP_KERNEL);
579 if (!save_encoders) 582 if (!save_encoder_crtcs)
580 return -ENOMEM; 583 return -ENOMEM;
581 584
582 save_connectors = kzalloc(dev->mode_config.num_connector * 585 save_connector_encoders = kzalloc(dev->mode_config.num_connector *
583 sizeof(struct drm_connector), GFP_KERNEL); 586 sizeof(struct drm_encoder *), GFP_KERNEL);
584 if (!save_connectors) { 587 if (!save_connector_encoders) {
585 kfree(save_encoders); 588 kfree(save_encoder_crtcs);
586 return -ENOMEM; 589 return -ENOMEM;
587 } 590 }
588 591
@@ -593,12 +596,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
593 */ 596 */
594 count = 0; 597 count = 0;
595 drm_for_each_encoder(encoder, dev) { 598 drm_for_each_encoder(encoder, dev) {
596 save_encoders[count++] = *encoder; 599 save_encoder_crtcs[count++] = encoder->crtc;
597 } 600 }
598 601
599 count = 0; 602 count = 0;
600 drm_for_each_connector(connector, dev) { 603 drm_for_each_connector(connector, dev) {
601 save_connectors[count++] = *connector; 604 save_connector_encoders[count++] = connector->encoder;
602 } 605 }
603 606
604 save_set.crtc = set->crtc; 607 save_set.crtc = set->crtc;
@@ -631,8 +634,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
631 mode_changed = true; 634 mode_changed = true;
632 } 635 }
633 636
634 /* take a reference on all connectors in set */ 637 /* take a reference on all unbound connectors in set, reuse the
638 * already taken reference for bound connectors
639 */
635 for (ro = 0; ro < set->num_connectors; ro++) { 640 for (ro = 0; ro < set->num_connectors; ro++) {
641 if (set->connectors[ro]->encoder)
642 continue;
636 drm_connector_reference(set->connectors[ro]); 643 drm_connector_reference(set->connectors[ro]);
637 } 644 }
638 645
@@ -754,30 +761,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
754 } 761 }
755 } 762 }
756 763
757 /* after fail drop reference on all connectors in save set */ 764 kfree(save_connector_encoders);
758 count = 0; 765 kfree(save_encoder_crtcs);
759 drm_for_each_connector(connector, dev) {
760 drm_connector_unreference(&save_connectors[count++]);
761 }
762
763 kfree(save_connectors);
764 kfree(save_encoders);
765 return 0; 766 return 0;
766 767
767fail: 768fail:
768 /* Restore all previous data. */ 769 /* Restore all previous data. */
769 count = 0; 770 count = 0;
770 drm_for_each_encoder(encoder, dev) { 771 drm_for_each_encoder(encoder, dev) {
771 *encoder = save_encoders[count++]; 772 encoder->crtc = save_encoder_crtcs[count++];
772 } 773 }
773 774
774 count = 0; 775 count = 0;
775 drm_for_each_connector(connector, dev) { 776 drm_for_each_connector(connector, dev) {
776 *connector = save_connectors[count++]; 777 connector->encoder = save_connector_encoders[count++];
777 } 778 }
778 779
779 /* after fail drop reference on all connectors in set */ 780 /* after fail drop reference on all unbound connectors in set, let
781 * bound connectors keep their reference
782 */
780 for (ro = 0; ro < set->num_connectors; ro++) { 783 for (ro = 0; ro < set->num_connectors; ro++) {
784 if (set->connectors[ro]->encoder)
785 continue;
781 drm_connector_unreference(set->connectors[ro]); 786 drm_connector_unreference(set->connectors[ro]);
782 } 787 }
783 788
@@ -787,8 +792,8 @@ fail:
787 save_set.y, save_set.fb)) 792 save_set.y, save_set.fb))
788 DRM_ERROR("failed to restore config after modeset failure\n"); 793 DRM_ERROR("failed to restore config after modeset failure\n");
789 794
790 kfree(save_connectors); 795 kfree(save_connector_encoders);
791 kfree(save_encoders); 796 kfree(save_encoder_crtcs);
792 return ret; 797 return ret;
793} 798}
794EXPORT_SYMBOL(drm_crtc_helper_set_config); 799EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1121,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1121 return drm_plane_helper_commit(plane, plane_state, old_fb); 1126 return drm_plane_helper_commit(plane, plane_state, old_fb);
1122} 1127}
1123EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); 1128EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
1124
1125/**
1126 * drm_helper_crtc_enable_color_mgmt - enable color management properties
1127 * @crtc: DRM CRTC
1128 * @degamma_lut_size: the size of the degamma lut (before CSC)
1129 * @gamma_lut_size: the size of the gamma lut (after CSC)
1130 *
1131 * This function lets the driver enable the color correction properties on a
1132 * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
1133 * set and 2 size properties to inform the userspace of the lut sizes.
1134 */
1135void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
1136 int degamma_lut_size,
1137 int gamma_lut_size)
1138{
1139 struct drm_device *dev = crtc->dev;
1140 struct drm_mode_config *config = &dev->mode_config;
1141
1142 drm_object_attach_property(&crtc->base,
1143 config->degamma_lut_property, 0);
1144 drm_object_attach_property(&crtc->base,
1145 config->ctm_property, 0);
1146 drm_object_attach_property(&crtc->base,
1147 config->gamma_lut_property, 0);
1148
1149 drm_object_attach_property(&crtc->base,
1150 config->degamma_lut_size_property,
1151 degamma_lut_size);
1152 drm_object_attach_property(&crtc->base,
1153 config->gamma_lut_size_property,
1154 gamma_lut_size);
1155}
1156EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a78c138282ea..47a500b90fd7 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,14 +31,100 @@
31 * and are not exported to drivers. 31 * and are not exported to drivers.
32 */ 32 */
33 33
34
35/* drm_crtc.c */
36void drm_connector_ida_init(void);
37void drm_connector_ida_destroy(void);
34int drm_mode_object_get(struct drm_device *dev, 38int drm_mode_object_get(struct drm_device *dev,
35 struct drm_mode_object *obj, uint32_t obj_type); 39 struct drm_mode_object *obj, uint32_t obj_type);
36void drm_mode_object_unregister(struct drm_device *dev, 40void drm_mode_object_unregister(struct drm_device *dev,
37 struct drm_mode_object *object); 41 struct drm_mode_object *object);
42bool drm_property_change_valid_get(struct drm_property *property,
43 uint64_t value,
44 struct drm_mode_object **ref);
45void drm_property_change_valid_put(struct drm_property *property,
46 struct drm_mode_object *ref);
47
48int drm_plane_check_pixel_format(const struct drm_plane *plane,
49 u32 format);
50int drm_crtc_check_viewport(const struct drm_crtc *crtc,
51 int x, int y,
52 const struct drm_display_mode *mode,
53 const struct drm_framebuffer *fb);
54
55void drm_fb_release(struct drm_file *file_priv);
56void drm_property_destroy_user_blobs(struct drm_device *dev,
57 struct drm_file *file_priv);
58
59/* dumb buffer support IOCTLs */
60int drm_mode_create_dumb_ioctl(struct drm_device *dev,
61 void *data, struct drm_file *file_priv);
62int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
63 void *data, struct drm_file *file_priv);
64int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
65 void *data, struct drm_file *file_priv);
66
67/* framebuffer IOCTLs */
68extern int drm_mode_addfb(struct drm_device *dev,
69 void *data, struct drm_file *file_priv);
70extern int drm_mode_addfb2(struct drm_device *dev,
71 void *data, struct drm_file *file_priv);
72int drm_mode_rmfb(struct drm_device *dev,
73 void *data, struct drm_file *file_priv);
74int drm_mode_getfb(struct drm_device *dev,
75 void *data, struct drm_file *file_priv);
76int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
77 void *data, struct drm_file *file_priv);
78
79/* IOCTLs */
80int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file_priv);
82int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
83 struct drm_file *file_priv);
84
85int drm_mode_getresources(struct drm_device *dev,
86 void *data, struct drm_file *file_priv);
87int drm_mode_getplane_res(struct drm_device *dev, void *data,
88 struct drm_file *file_priv);
89int drm_mode_getcrtc(struct drm_device *dev,
90 void *data, struct drm_file *file_priv);
91int drm_mode_getconnector(struct drm_device *dev,
92 void *data, struct drm_file *file_priv);
93int drm_mode_setcrtc(struct drm_device *dev,
94 void *data, struct drm_file *file_priv);
95int drm_mode_getplane(struct drm_device *dev,
96 void *data, struct drm_file *file_priv);
97int drm_mode_setplane(struct drm_device *dev,
98 void *data, struct drm_file *file_priv);
99int drm_mode_cursor_ioctl(struct drm_device *dev,
100 void *data, struct drm_file *file_priv);
101int drm_mode_cursor2_ioctl(struct drm_device *dev,
102 void *data, struct drm_file *file_priv);
103int drm_mode_getproperty_ioctl(struct drm_device *dev,
104 void *data, struct drm_file *file_priv);
105int drm_mode_getblob_ioctl(struct drm_device *dev,
106 void *data, struct drm_file *file_priv);
107int drm_mode_createblob_ioctl(struct drm_device *dev,
108 void *data, struct drm_file *file_priv);
109int drm_mode_destroyblob_ioctl(struct drm_device *dev,
110 void *data, struct drm_file *file_priv);
111int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
112 void *data, struct drm_file *file_priv);
113int drm_mode_getencoder(struct drm_device *dev,
114 void *data, struct drm_file *file_priv);
115int drm_mode_gamma_get_ioctl(struct drm_device *dev,
116 void *data, struct drm_file *file_priv);
117int drm_mode_gamma_set_ioctl(struct drm_device *dev,
118 void *data, struct drm_file *file_priv);
119
120int drm_mode_page_flip_ioctl(struct drm_device *dev,
121 void *data, struct drm_file *file_priv);
38 122
39/* drm_atomic.c */ 123/* drm_atomic.c */
40int drm_atomic_get_property(struct drm_mode_object *obj, 124int drm_atomic_get_property(struct drm_mode_object *obj,
41 struct drm_property *property, uint64_t *val); 125 struct drm_property *property, uint64_t *val);
42int drm_mode_atomic_ioctl(struct drm_device *dev, 126int drm_mode_atomic_ioctl(struct drm_device *dev,
43 void *data, struct drm_file *file_priv); 127 void *data, struct drm_file *file_priv);
44 128
129int drm_modeset_register_all(struct drm_device *dev);
130void drm_modeset_unregister_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3bcf8e6a85b3..fa10cef2ba37 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,11 +46,8 @@
46 46
47static const struct drm_info_list drm_debugfs_list[] = { 47static const struct drm_info_list drm_debugfs_list[] = {
48 {"name", drm_name_info, 0}, 48 {"name", drm_name_info, 0},
49 {"vm", drm_vm_info, 0},
50 {"clients", drm_clients_info, 0}, 49 {"clients", drm_clients_info, 0},
51 {"bufs", drm_bufs_info, 0},
52 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 50 {"gem_names", drm_gem_name_info, DRIVER_GEM},
53 {"vma", drm_vma_info, 0},
54}; 51};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) 52#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56 53
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index eeaf5a7c3aa7..091053e995e5 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
708 708
709 memset(&msg, 0, sizeof(msg)); 709 memset(&msg, 0, sizeof(msg));
710 710
711 mutex_lock(&aux->hw_mutex);
712
713 for (i = 0; i < num; i++) { 711 for (i = 0; i < num; i++) {
714 msg.address = msgs[i].addr; 712 msg.address = msgs[i].addr;
715 drm_dp_i2c_msg_set_request(&msg, &msgs[i]); 713 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
764 msg.size = 0; 762 msg.size = 0;
765 (void)drm_dp_i2c_do_msg(aux, &msg); 763 (void)drm_dp_i2c_do_msg(aux, &msg);
766 764
767 mutex_unlock(&aux->hw_mutex);
768
769 return err; 765 return err;
770} 766}
771 767
@@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
774 .master_xfer = drm_dp_i2c_xfer, 770 .master_xfer = drm_dp_i2c_xfer,
775}; 771};
776 772
773static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
774{
775 return container_of(i2c, struct drm_dp_aux, ddc);
776}
777
778static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
779{
780 mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
781}
782
783static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
784{
785 return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
786}
787
788static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
789{
790 mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
791}
792
777/** 793/**
778 * drm_dp_aux_register() - initialise and register aux channel 794 * drm_dp_aux_init() - minimally initialise an aux channel
779 * @aux: DisplayPort AUX channel 795 * @aux: DisplayPort AUX channel
780 * 796 *
781 * Returns 0 on success or a negative error code on failure. 797 * If you need to use the drm_dp_aux's i2c adapter prior to registering it
798 * with the outside world, call drm_dp_aux_init() first. You must still
799 * call drm_dp_aux_register() once the connector has been registered to
800 * allow userspace access to the auxiliary DP channel.
782 */ 801 */
783int drm_dp_aux_register(struct drm_dp_aux *aux) 802void drm_dp_aux_init(struct drm_dp_aux *aux)
784{ 803{
785 int ret;
786
787 mutex_init(&aux->hw_mutex); 804 mutex_init(&aux->hw_mutex);
788 805
789 aux->ddc.algo = &drm_dp_i2c_algo; 806 aux->ddc.algo = &drm_dp_i2c_algo;
790 aux->ddc.algo_data = aux; 807 aux->ddc.algo_data = aux;
791 aux->ddc.retries = 3; 808 aux->ddc.retries = 3;
792 809
810 aux->ddc.lock_bus = lock_bus;
811 aux->ddc.trylock_bus = trylock_bus;
812 aux->ddc.unlock_bus = unlock_bus;
813}
814EXPORT_SYMBOL(drm_dp_aux_init);
815
816/**
817 * drm_dp_aux_register() - initialise and register aux channel
818 * @aux: DisplayPort AUX channel
819 *
820 * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
821 *
822 * Returns 0 on success or a negative error code on failure.
823 */
824int drm_dp_aux_register(struct drm_dp_aux *aux)
825{
826 int ret;
827
828 if (!aux->ddc.algo)
829 drm_dp_aux_init(aux);
830
793 aux->ddc.class = I2C_CLASS_DDC; 831 aux->ddc.class = I2C_CLASS_DDC;
794 aux->ddc.owner = THIS_MODULE; 832 aux->ddc.owner = THIS_MODULE;
795 aux->ddc.dev.parent = aux->dev; 833 aux->ddc.dev.parent = aux->dev;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index a13edf5de2d6..6537908050d7 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2927 drm_dp_port_teardown_pdt(port, port->pdt); 2927 drm_dp_port_teardown_pdt(port, port->pdt);
2928 2928
2929 if (!port->input && port->vcpi.vcpi > 0) { 2929 if (!port->input && port->vcpi.vcpi > 0) {
2930 if (mgr->mst_state) { 2930 drm_dp_mst_reset_vcpi_slots(mgr, port);
2931 drm_dp_mst_reset_vcpi_slots(mgr, port); 2931 drm_dp_update_payload_part1(mgr);
2932 drm_dp_update_payload_part1(mgr); 2932 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2933 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2934 }
2935 } 2933 }
2936 2934
2937 kref_put(&port->kref, drm_dp_free_mst_port); 2935 kref_put(&port->kref, drm_dp_free_mst_port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bff89226a344..aead9ffcbe29 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,8 +34,10 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include <drm/drm_core.h> 36#include <drm/drm_core.h>
37#include "drm_crtc_internal.h"
37#include "drm_legacy.h" 38#include "drm_legacy.h"
38#include "drm_internal.h" 39#include "drm_internal.h"
40#include "drm_crtc_internal.h"
39 41
40/* 42/*
41 * drm_debug: Enable debug output. 43 * drm_debug: Enable debug output.
@@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
93} 95}
94EXPORT_SYMBOL(drm_ut_debug_printk); 96EXPORT_SYMBOL(drm_ut_debug_printk);
95 97
96struct drm_master *drm_master_create(struct drm_minor *minor)
97{
98 struct drm_master *master;
99
100 master = kzalloc(sizeof(*master), GFP_KERNEL);
101 if (!master)
102 return NULL;
103
104 kref_init(&master->refcount);
105 spin_lock_init(&master->lock.spinlock);
106 init_waitqueue_head(&master->lock.lock_queue);
107 idr_init(&master->magic_map);
108 master->minor = minor;
109
110 return master;
111}
112
113struct drm_master *drm_master_get(struct drm_master *master)
114{
115 kref_get(&master->refcount);
116 return master;
117}
118EXPORT_SYMBOL(drm_master_get);
119
120static void drm_master_destroy(struct kref *kref)
121{
122 struct drm_master *master = container_of(kref, struct drm_master, refcount);
123 struct drm_device *dev = master->minor->dev;
124
125 if (dev->driver->master_destroy)
126 dev->driver->master_destroy(dev, master);
127
128 drm_legacy_master_rmmaps(dev, master);
129
130 idr_destroy(&master->magic_map);
131 kfree(master->unique);
132 kfree(master);
133}
134
135void drm_master_put(struct drm_master **master)
136{
137 kref_put(&(*master)->refcount, drm_master_destroy);
138 *master = NULL;
139}
140EXPORT_SYMBOL(drm_master_put);
141
142int drm_setmaster_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file_priv)
144{
145 int ret = 0;
146
147 mutex_lock(&dev->master_mutex);
148 if (file_priv->is_master)
149 goto out_unlock;
150
151 if (file_priv->minor->master) {
152 ret = -EINVAL;
153 goto out_unlock;
154 }
155
156 if (!file_priv->master) {
157 ret = -EINVAL;
158 goto out_unlock;
159 }
160
161 if (!file_priv->allowed_master) {
162 ret = drm_new_set_master(dev, file_priv);
163 goto out_unlock;
164 }
165
166 file_priv->minor->master = drm_master_get(file_priv->master);
167 file_priv->is_master = 1;
168 if (dev->driver->master_set) {
169 ret = dev->driver->master_set(dev, file_priv, false);
170 if (unlikely(ret != 0)) {
171 file_priv->is_master = 0;
172 drm_master_put(&file_priv->minor->master);
173 }
174 }
175
176out_unlock:
177 mutex_unlock(&dev->master_mutex);
178 return ret;
179}
180
181int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 int ret = -EINVAL;
185
186 mutex_lock(&dev->master_mutex);
187 if (!file_priv->is_master)
188 goto out_unlock;
189
190 if (!file_priv->minor->master)
191 goto out_unlock;
192
193 ret = 0;
194 if (dev->driver->master_drop)
195 dev->driver->master_drop(dev, file_priv, false);
196 drm_master_put(&file_priv->minor->master);
197 file_priv->is_master = 0;
198
199out_unlock:
200 mutex_unlock(&dev->master_mutex);
201 return ret;
202}
203
204/* 98/*
205 * DRM Minors 99 * DRM Minors
206 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 100 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor)
405 * callbacks implemented by the driver. The driver then needs to initialize all 299 * callbacks implemented by the driver. The driver then needs to initialize all
406 * the various subsystems for the drm device like memory management, vblank 300 * the various subsystems for the drm device like memory management, vblank
407 * handling, modesetting support and intial output configuration plus obviously 301 * handling, modesetting support and intial output configuration plus obviously
408 * initialize all the corresponding hardware bits. An important part of this is 302 * initialize all the corresponding hardware bits. Finally when everything is up
409 * also calling drm_dev_set_unique() to set the userspace-visible unique name of 303 * and running and ready for userspace the device instance can be published
410 * this device instance. Finally when everything is up and running and ready for 304 * using drm_dev_register().
411 * userspace the device instance can be published using drm_dev_register().
412 * 305 *
413 * There is also deprecated support for initalizing device instances using 306 * There is also deprecated support for initalizing device instances using
414 * bus-specific helpers and the ->load() callback. But due to 307 * bus-specific helpers and the ->load() callback. But due to
@@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor)
430 * dev_priv field of &drm_device. 323 * dev_priv field of &drm_device.
431 */ 324 */
432 325
326static int drm_dev_set_unique(struct drm_device *dev, const char *name)
327{
328 kfree(dev->unique);
329 dev->unique = kstrdup(name, GFP_KERNEL);
330
331 return dev->unique ? 0 : -ENOMEM;
332}
333
433/** 334/**
434 * drm_put_dev - Unregister and release a DRM device 335 * drm_put_dev - Unregister and release a DRM device
435 * @dev: DRM device 336 * @dev: DRM device
@@ -549,11 +450,12 @@ static void drm_fs_inode_free(struct inode *inode)
549} 450}
550 451
551/** 452/**
552 * drm_dev_alloc - Allocate new DRM device 453 * drm_dev_init - Initialise new DRM device
553 * @driver: DRM driver to allocate device for 454 * @dev: DRM device
455 * @driver: DRM driver
554 * @parent: Parent device object 456 * @parent: Parent device object
555 * 457 *
556 * Allocate and initialize a new DRM device. No device registration is done. 458 * Initialize a new DRM device. No device registration is done.
557 * Call drm_dev_register() to advertice the device to user space and register it 459 * Call drm_dev_register() to advertice the device to user space and register it
558 * with other core subsystems. This should be done last in the device 460 * with other core subsystems. This should be done last in the device
559 * initialization sequence to make sure userspace can't access an inconsistent 461 * initialization sequence to make sure userspace can't access an inconsistent
@@ -564,19 +466,18 @@ static void drm_fs_inode_free(struct inode *inode)
564 * 466 *
565 * Note that for purely virtual devices @parent can be NULL. 467 * Note that for purely virtual devices @parent can be NULL.
566 * 468 *
469 * Drivers that do not want to allocate their own device struct
470 * embedding struct &drm_device can call drm_dev_alloc() instead.
471 *
567 * RETURNS: 472 * RETURNS:
568 * Pointer to new DRM device, or NULL if out of memory. 473 * 0 on success, or error code on failure.
569 */ 474 */
570struct drm_device *drm_dev_alloc(struct drm_driver *driver, 475int drm_dev_init(struct drm_device *dev,
571 struct device *parent) 476 struct drm_driver *driver,
477 struct device *parent)
572{ 478{
573 struct drm_device *dev;
574 int ret; 479 int ret;
575 480
576 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
577 if (!dev)
578 return NULL;
579
580 kref_init(&dev->ref); 481 kref_init(&dev->ref);
581 dev->dev = parent; 482 dev->dev = parent;
582 dev->driver = driver; 483 dev->driver = driver;
@@ -605,8 +506,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
605 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 506 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
606 if (ret) 507 if (ret)
607 goto err_minors; 508 goto err_minors;
608
609 WARN_ON(driver->suspend || driver->resume);
610 } 509 }
611 510
612 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 511 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -619,7 +518,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
619 if (ret) 518 if (ret)
620 goto err_minors; 519 goto err_minors;
621 520
622 if (drm_ht_create(&dev->map_hash, 12)) 521 ret = drm_ht_create(&dev->map_hash, 12);
522 if (ret)
623 goto err_minors; 523 goto err_minors;
624 524
625 drm_legacy_ctxbitmap_init(dev); 525 drm_legacy_ctxbitmap_init(dev);
@@ -632,13 +532,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
632 } 532 }
633 } 533 }
634 534
635 if (parent) { 535 /* Use the parent device name as DRM device unique identifier, but fall
636 ret = drm_dev_set_unique(dev, dev_name(parent)); 536 * back to the driver name for virtual devices like vgem. */
637 if (ret) 537 ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
638 goto err_setunique; 538 if (ret)
639 } 539 goto err_setunique;
640 540
641 return dev; 541 return 0;
642 542
643err_setunique: 543err_setunique:
644 if (drm_core_check_feature(dev, DRIVER_GEM)) 544 if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -653,8 +553,49 @@ err_minors:
653 drm_fs_inode_free(dev->anon_inode); 553 drm_fs_inode_free(dev->anon_inode);
654err_free: 554err_free:
655 mutex_destroy(&dev->master_mutex); 555 mutex_destroy(&dev->master_mutex);
656 kfree(dev); 556 return ret;
657 return NULL; 557}
558EXPORT_SYMBOL(drm_dev_init);
559
560/**
561 * drm_dev_alloc - Allocate new DRM device
562 * @driver: DRM driver to allocate device for
563 * @parent: Parent device object
564 *
565 * Allocate and initialize a new DRM device. No device registration is done.
566 * Call drm_dev_register() to advertice the device to user space and register it
567 * with other core subsystems. This should be done last in the device
568 * initialization sequence to make sure userspace can't access an inconsistent
569 * state.
570 *
571 * The initial ref-count of the object is 1. Use drm_dev_ref() and
572 * drm_dev_unref() to take and drop further ref-counts.
573 *
574 * Note that for purely virtual devices @parent can be NULL.
575 *
576 * Drivers that wish to subclass or embed struct &drm_device into their
577 * own struct should look at using drm_dev_init() instead.
578 *
579 * RETURNS:
580 * Pointer to new DRM device, or NULL if out of memory.
581 */
582struct drm_device *drm_dev_alloc(struct drm_driver *driver,
583 struct device *parent)
584{
585 struct drm_device *dev;
586 int ret;
587
588 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
589 if (!dev)
590 return NULL;
591
592 ret = drm_dev_init(dev, driver, parent);
593 if (ret) {
594 kfree(dev);
595 return NULL;
596 }
597
598 return dev;
658} 599}
659EXPORT_SYMBOL(drm_dev_alloc); 600EXPORT_SYMBOL(drm_dev_alloc);
660 601
@@ -718,11 +659,7 @@ EXPORT_SYMBOL(drm_dev_unref);
718 * 659 *
719 * Register the DRM device @dev with the system, advertise device to user-space 660 * Register the DRM device @dev with the system, advertise device to user-space
720 * and start normal device operation. @dev must be allocated via drm_dev_alloc() 661 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
721 * previously. Right after drm_dev_register() the driver should call 662 * previously.
722 * drm_connector_register_all() to register all connectors in sysfs. This is
723 * a separate call for backward compatibility with drivers still using
724 * the deprecated ->load() callback, where connectors are registered from within
725 * the ->load() callback.
726 * 663 *
727 * Never call this twice on any device! 664 * Never call this twice on any device!
728 * 665 *
@@ -759,6 +696,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
759 goto err_minors; 696 goto err_minors;
760 } 697 }
761 698
699 if (drm_core_check_feature(dev, DRIVER_MODESET))
700 drm_modeset_register_all(dev);
701
762 ret = 0; 702 ret = 0;
763 goto out_unlock; 703 goto out_unlock;
764 704
@@ -789,6 +729,9 @@ void drm_dev_unregister(struct drm_device *dev)
789 729
790 drm_lastclose(dev); 730 drm_lastclose(dev);
791 731
732 if (drm_core_check_feature(dev, DRIVER_MODESET))
733 drm_modeset_unregister_all(dev);
734
792 if (dev->driver->unload) 735 if (dev->driver->unload)
793 dev->driver->unload(dev); 736 dev->driver->unload(dev);
794 737
@@ -806,26 +749,6 @@ void drm_dev_unregister(struct drm_device *dev)
806} 749}
807EXPORT_SYMBOL(drm_dev_unregister); 750EXPORT_SYMBOL(drm_dev_unregister);
808 751
809/**
810 * drm_dev_set_unique - Set the unique name of a DRM device
811 * @dev: device of which to set the unique name
812 * @name: unique name
813 *
814 * Sets the unique name of a DRM device using the specified string. Drivers
815 * can use this at driver probe time if the unique name of the devices they
816 * drive is static.
817 *
818 * Return: 0 on success or a negative error code on failure.
819 */
820int drm_dev_set_unique(struct drm_device *dev, const char *name)
821{
822 kfree(dev->unique);
823 dev->unique = kstrdup(name, GFP_KERNEL);
824
825 return dev->unique ? 0 : -ENOMEM;
826}
827EXPORT_SYMBOL(drm_dev_set_unique);
828
829/* 752/*
830 * DRM Core 753 * DRM Core
831 * The DRM core module initializes all global DRM objects and makes them 754 * The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed98e0..622f788bff46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
271 * by commas, search through the list looking for one that 271 * by commas, search through the list looking for one that
272 * matches the connector. 272 * matches the connector.
273 * 273 *
274 * If there's one or more that don't't specify a connector, keep 274 * If there's one or more that doesn't specify a connector, keep
275 * the last one found one as a fallback. 275 * the last one found one as a fallback.
276 */ 276 */
277 fwstr = kstrdup(edid_firmware, GFP_KERNEL); 277 fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 172cafe11c71..1fd6eac1400c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h> 24#include <drm/drm_gem_cma_helper.h>
25#include <drm/drm_fb_cma_helper.h> 25#include <drm/drm_fb_cma_helper.h>
26#include <linux/dma-mapping.h>
26#include <linux/module.h> 27#include <linux/module.h>
27 28
28#define DEFAULT_FBDEFIO_DELAY_MS 50 29#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
52 * will be set up automatically. dirty() is called by 53 * will be set up automatically. dirty() is called by
53 * drm_fb_helper_deferred_io() in process context (struct delayed_work). 54 * drm_fb_helper_deferred_io() in process context (struct delayed_work).
54 * 55 *
55 * Example fbdev deferred io code: 56 * Example fbdev deferred io code::
56 * 57 *
57 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, 58 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
58 * struct drm_file *file_priv, 59 * struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
162 * drm_fb_cma_create_with_funcs() - helper function for the 163 * drm_fb_cma_create_with_funcs() - helper function for the
163 * &drm_mode_config_funcs ->fb_create 164 * &drm_mode_config_funcs ->fb_create
164 * callback function 165 * callback function
166 * @dev: DRM device
167 * @file_priv: drm file for the ioctl call
168 * @mode_cmd: metadata from the userspace fb creation request
169 * @funcs: vtable to be used for the new framebuffer object
165 * 170 *
166 * This can be used to set &drm_framebuffer_funcs for drivers that need the 171 * This can be used to set &drm_framebuffer_funcs for drivers that need the
167 * dirty() callback. Use drm_fb_cma_create() if you don't need to change 172 * dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
223 228
224/** 229/**
225 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function 230 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
231 * @dev: DRM device
232 * @file_priv: drm file for the ioctl call
233 * @mode_cmd: metadata from the userspace fb creation request
226 * 234 *
227 * If your hardware has special alignment or pitch requirements these should be 235 * If your hardware has special alignment or pitch requirements these should be
228 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if 236 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
246 * This function will usually be called from the CRTC callback functions. 254 * This function will usually be called from the CRTC callback functions.
247 */ 255 */
248struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, 256struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
249 unsigned int plane) 257 unsigned int plane)
250{ 258{
251 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 259 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
252 260
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
258EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 266EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
259 267
260#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
261/*
262 * drm_fb_cma_describe() - Helper to dump information about a single
263 * CMA framebuffer object
264 */
265static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 269static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
266{ 270{
267 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 271 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
279 283
280/** 284/**
281 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 285 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
282 * in debugfs. 286 * in debugfs.
287 * @m: output file
288 * @arg: private data for the callback
283 */ 289 */
284int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) 290int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
285{ 291{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
297EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); 303EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
298#endif 304#endif
299 305
306static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
307{
308 return dma_mmap_writecombine(info->device, vma, info->screen_base,
309 info->fix.smem_start, info->fix.smem_len);
310}
311
300static struct fb_ops drm_fbdev_cma_ops = { 312static struct fb_ops drm_fbdev_cma_ops = {
301 .owner = THIS_MODULE, 313 .owner = THIS_MODULE,
302 .fb_fillrect = drm_fb_helper_sys_fillrect, 314 .fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
307 .fb_blank = drm_fb_helper_blank, 319 .fb_blank = drm_fb_helper_blank,
308 .fb_pan_display = drm_fb_helper_pan_display, 320 .fb_pan_display = drm_fb_helper_pan_display,
309 .fb_setcmap = drm_fb_helper_setcmap, 321 .fb_setcmap = drm_fb_helper_setcmap,
322 .fb_mmap = drm_fb_cma_mmap,
310}; 323};
311 324
312static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, 325static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
333 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); 346 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
334 if (!fbdefio || !fbops) { 347 if (!fbdefio || !fbops) {
335 kfree(fbdefio); 348 kfree(fbdefio);
349 kfree(fbops);
336 return -ENOMEM; 350 return -ENOMEM;
337 } 351 }
338 352
@@ -445,7 +459,7 @@ err_cma_destroy:
445err_fb_info_destroy: 459err_fb_info_destroy:
446 drm_fb_helper_release_fbi(helper); 460 drm_fb_helper_release_fbi(helper);
447err_gem_free_object: 461err_gem_free_object:
448 dev->driver->gem_free_object(&obj->base); 462 drm_gem_object_unreference_unlocked(&obj->base);
449 return ret; 463 return ret;
450} 464}
451EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); 465EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
@@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
582 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); 596 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
583} 597}
584EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); 598EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
599
600/**
601 * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
602 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
603 * @state: desired state, zero to resume, non-zero to suspend
604 *
605 * Calls drm_fb_helper_set_suspend, which is a wrapper around
606 * fb_set_suspend implemented by fbdev core.
607 */
608void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
609{
610 if (fbdev_cma)
611 drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
612}
613EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db60f..ce54e985d91b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
227 g_base = r_base + crtc->gamma_size; 227 g_base = r_base + crtc->gamma_size;
228 b_base = g_base + crtc->gamma_size; 228 b_base = g_base + crtc->gamma_size;
229 229
230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
231} 231}
232 232
233/** 233/**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
385 385
386 drm_warn_on_modeset_not_all_locked(dev); 386 drm_warn_on_modeset_not_all_locked(dev);
387 387
388 if (fb_helper->atomic) 388 if (dev->mode_config.funcs->atomic_commit)
389 return restore_fbdev_mode_atomic(fb_helper); 389 return restore_fbdev_mode_atomic(fb_helper);
390 390
391 drm_for_each_plane(plane, dev) { 391 drm_for_each_plane(plane, dev) {
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
464 464
465 /* Sometimes user space wants everything disabled, so don't steal the 465 /* Sometimes user space wants everything disabled, so don't steal the
466 * display if there's a master. */ 466 * display if there's a master. */
467 if (dev->primary->master) 467 if (lockless_dereference(dev->master))
468 return false; 468 return false;
469 469
470 drm_for_each_crtc(crtc, dev) { 470 drm_for_each_crtc(crtc, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
716 i++; 716 i++;
717 } 717 }
718 718
719 fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
720
721 return 0; 719 return 0;
722out_free: 720out_free:
723 drm_fb_helper_crtc_free(fb_helper); 721 drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1042{ 1040{
1043 struct drm_fb_helper *fb_helper = info->par; 1041 struct drm_fb_helper *fb_helper = info->par;
1044 struct drm_framebuffer *fb = fb_helper->fb; 1042 struct drm_framebuffer *fb = fb_helper->fb;
1045 int pindex;
1046 1043
1047 if (info->fix.visual == FB_VISUAL_TRUECOLOR) { 1044 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
1048 u32 *palette; 1045 u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1074 !fb_helper->funcs->gamma_get)) 1071 !fb_helper->funcs->gamma_get))
1075 return -EINVAL; 1072 return -EINVAL;
1076 1073
1077 pindex = regno; 1074 WARN_ON(fb->bits_per_pixel != 8);
1078
1079 if (fb->bits_per_pixel == 16) {
1080 pindex = regno << 3;
1081
1082 if (fb->depth == 16 && regno > 63)
1083 return -EINVAL;
1084 if (fb->depth == 15 && regno > 31)
1085 return -EINVAL;
1086
1087 if (fb->depth == 16) {
1088 u16 r, g, b;
1089 int i;
1090 if (regno < 32) {
1091 for (i = 0; i < 8; i++)
1092 fb_helper->funcs->gamma_set(crtc, red,
1093 green, blue, pindex + i);
1094 }
1095 1075
1096 fb_helper->funcs->gamma_get(crtc, &r, 1076 fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
1097 &g, &b,
1098 pindex >> 1);
1099 1077
1100 for (i = 0; i < 4; i++)
1101 fb_helper->funcs->gamma_set(crtc, r,
1102 green, b,
1103 (pindex >> 1) + i);
1104 }
1105 }
1106
1107 if (fb->depth != 16)
1108 fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
1109 return 0; 1078 return 0;
1110} 1079}
1111 1080
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1373 return -EBUSY; 1342 return -EBUSY;
1374 } 1343 }
1375 1344
1376 if (fb_helper->atomic) { 1345 if (dev->mode_config.funcs->atomic_commit) {
1377 ret = pan_display_atomic(var, info); 1346 ret = pan_display_atomic(var, info);
1378 goto unlock; 1347 goto unlock;
1379 } 1348 }
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2000 my_score++; 1969 my_score++;
2001 1970
2002 connector_funcs = connector->helper_private; 1971 connector_funcs = connector->helper_private;
2003 encoder = connector_funcs->best_encoder(connector); 1972
1973 /*
1974 * If the DRM device implements atomic hooks and ->best_encoder() is
1975 * NULL we fallback to the default drm_atomic_helper_best_encoder()
1976 * helper.
1977 */
1978 if (fb_helper->dev->mode_config.funcs->atomic_commit &&
1979 !connector_funcs->best_encoder)
1980 encoder = drm_atomic_helper_best_encoder(connector);
1981 else
1982 encoder = connector_funcs->best_encoder(connector);
1983
2004 if (!encoder) 1984 if (!encoder)
2005 goto out; 1985 goto out;
2006 1986
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb355..323c238fcac7 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -40,6 +40,7 @@
40#include <linux/module.h> 40#include <linux/module.h>
41#include "drm_legacy.h" 41#include "drm_legacy.h"
42#include "drm_internal.h" 42#include "drm_internal.h"
43#include "drm_crtc_internal.h"
43 44
44/* from BKL pushdown */ 45/* from BKL pushdown */
45DEFINE_MUTEX(drm_global_mutex); 46DEFINE_MUTEX(drm_global_mutex);
@@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex);
67 * specific implementations. For GEM-based drivers this is drm_gem_mmap(). 68 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
68 * 69 *
69 * No other file operations are supported by the DRM userspace API. Overall the 70 * No other file operations are supported by the DRM userspace API. Overall the
70 * following is an example #file_operations structure: 71 * following is an example #file_operations structure::
71 * 72 *
72 * static const example_drm_fops = { 73 * static const example_drm_fops = {
73 * .owner = THIS_MODULE, 74 * .owner = THIS_MODULE,
@@ -168,60 +169,6 @@ static int drm_cpu_valid(void)
168} 169}
169 170
170/* 171/*
171 * drm_new_set_master - Allocate a new master object and become master for the
172 * associated master realm.
173 *
174 * @dev: The associated device.
175 * @fpriv: File private identifying the client.
176 *
177 * This function must be called with dev::struct_mutex held.
178 * Returns negative error code on failure. Zero on success.
179 */
180int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
181{
182 struct drm_master *old_master;
183 int ret;
184
185 lockdep_assert_held_once(&dev->master_mutex);
186
187 /* create a new master */
188 fpriv->minor->master = drm_master_create(fpriv->minor);
189 if (!fpriv->minor->master)
190 return -ENOMEM;
191
192 /* take another reference for the copy in the local file priv */
193 old_master = fpriv->master;
194 fpriv->master = drm_master_get(fpriv->minor->master);
195
196 if (dev->driver->master_create) {
197 ret = dev->driver->master_create(dev, fpriv->master);
198 if (ret)
199 goto out_err;
200 }
201 if (dev->driver->master_set) {
202 ret = dev->driver->master_set(dev, fpriv, true);
203 if (ret)
204 goto out_err;
205 }
206
207 fpriv->is_master = 1;
208 fpriv->allowed_master = 1;
209 fpriv->authenticated = 1;
210 if (old_master)
211 drm_master_put(&old_master);
212
213 return 0;
214
215out_err:
216 /* drop both references and restore old master on failure */
217 drm_master_put(&fpriv->minor->master);
218 drm_master_put(&fpriv->master);
219 fpriv->master = old_master;
220
221 return ret;
222}
223
224/*
225 * Called whenever a process opens /dev/drm. 172 * Called whenever a process opens /dev/drm.
226 * 173 *
227 * \param filp file pointer. 174 * \param filp file pointer.
@@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
283 goto out_prime_destroy; 230 goto out_prime_destroy;
284 } 231 }
285 232
286 /* if there is no current master make this fd it, but do not create 233 if (drm_is_primary_client(priv)) {
287 * any master object for render clients */ 234 ret = drm_master_open(priv);
288 mutex_lock(&dev->master_mutex);
289 if (drm_is_primary_client(priv) && !priv->minor->master) {
290 /* create a new master */
291 ret = drm_new_set_master(dev, priv);
292 if (ret) 235 if (ret)
293 goto out_close; 236 goto out_close;
294 } else if (drm_is_primary_client(priv)) {
295 /* get a reference to the master */
296 priv->master = drm_master_get(priv->minor->master);
297 } 237 }
298 mutex_unlock(&dev->master_mutex);
299 238
300 mutex_lock(&dev->filelist_mutex); 239 mutex_lock(&dev->filelist_mutex);
301 list_add(&priv->lhead, &dev->filelist); 240 list_add(&priv->lhead, &dev->filelist);
@@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
324 return 0; 263 return 0;
325 264
326out_close: 265out_close:
327 mutex_unlock(&dev->master_mutex);
328 if (dev->driver->postclose) 266 if (dev->driver->postclose)
329 dev->driver->postclose(dev, priv); 267 dev->driver->postclose(dev, priv);
330out_prime_destroy: 268out_prime_destroy:
@@ -338,18 +276,6 @@ out_prime_destroy:
338 return ret; 276 return ret;
339} 277}
340 278
341static void drm_master_release(struct drm_device *dev, struct file *filp)
342{
343 struct drm_file *file_priv = filp->private_data;
344
345 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
346 DRM_DEBUG("File %p released, freeing lock for context %d\n",
347 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
348 drm_legacy_lock_free(&file_priv->master->lock,
349 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
350 }
351}
352
353static void drm_events_release(struct drm_file *file_priv) 279static void drm_events_release(struct drm_file *file_priv)
354{ 280{
355 struct drm_device *dev = file_priv->minor->dev; 281 struct drm_device *dev = file_priv->minor->dev;
@@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv)
368 /* Remove unconsumed events */ 294 /* Remove unconsumed events */
369 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { 295 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
370 list_del(&e->link); 296 list_del(&e->link);
371 e->destroy(e); 297 kfree(e);
372 } 298 }
373 299
374 spin_unlock_irqrestore(&dev->event_lock, flags); 300 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp)
451 list_del(&file_priv->lhead); 377 list_del(&file_priv->lhead);
452 mutex_unlock(&dev->filelist_mutex); 378 mutex_unlock(&dev->filelist_mutex);
453 379
454 mutex_lock(&dev->struct_mutex);
455 if (file_priv->magic)
456 idr_remove(&file_priv->master->magic_map, file_priv->magic);
457 mutex_unlock(&dev->struct_mutex);
458
459 if (dev->driver->preclose) 380 if (dev->driver->preclose)
460 dev->driver->preclose(dev, file_priv); 381 dev->driver->preclose(dev, file_priv);
461 382
@@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp)
468 (long)old_encode_dev(file_priv->minor->kdev->devt), 389 (long)old_encode_dev(file_priv->minor->kdev->devt),
469 dev->open_count); 390 dev->open_count);
470 391
471 /* if the master has gone away we can't do anything with the lock */ 392 if (!drm_core_check_feature(dev, DRIVER_MODESET))
472 if (file_priv->minor->master) 393 drm_legacy_lock_release(dev, filp);
473 drm_master_release(dev, filp);
474 394
475 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
476 drm_legacy_reclaim_buffers(dev, file_priv); 396 drm_legacy_reclaim_buffers(dev, file_priv);
@@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp)
487 407
488 drm_legacy_ctxbitmap_flush(dev, file_priv); 408 drm_legacy_ctxbitmap_flush(dev, file_priv);
489 409
490 mutex_lock(&dev->master_mutex); 410 if (drm_is_primary_client(file_priv))
491 411 drm_master_release(file_priv);
492 if (file_priv->is_master) {
493 struct drm_master *master = file_priv->master;
494
495 /*
496 * Since the master is disappearing, so is the
497 * possibility to lock.
498 */
499 mutex_lock(&dev->struct_mutex);
500 if (master->lock.hw_lock) {
501 if (dev->sigdata.lock == master->lock.hw_lock)
502 dev->sigdata.lock = NULL;
503 master->lock.hw_lock = NULL;
504 master->lock.file_priv = NULL;
505 wake_up_interruptible_all(&master->lock.lock_queue);
506 }
507 mutex_unlock(&dev->struct_mutex);
508
509 if (file_priv->minor->master == file_priv->master) {
510 /* drop the reference held my the minor */
511 if (dev->driver->master_drop)
512 dev->driver->master_drop(dev, file_priv, true);
513 drm_master_put(&file_priv->minor->master);
514 }
515 }
516
517 /* drop the master reference held by the file priv */
518 if (file_priv->master)
519 drm_master_put(&file_priv->master);
520 file_priv->is_master = 0;
521 mutex_unlock(&dev->master_mutex);
522 412
523 if (dev->driver->postclose) 413 if (dev->driver->postclose)
524 dev->driver->postclose(dev, file_priv); 414 dev->driver->postclose(dev, file_priv);
525 415
526
527 if (drm_core_check_feature(dev, DRIVER_PRIME)) 416 if (drm_core_check_feature(dev, DRIVER_PRIME))
528 drm_prime_destroy_file_private(&file_priv->prime); 417 drm_prime_destroy_file_private(&file_priv->prime);
529 418
@@ -636,7 +525,7 @@ put_back_event:
636 } 525 }
637 526
638 ret += length; 527 ret += length;
639 e->destroy(e); 528 kfree(e);
640 } 529 }
641 } 530 }
642 mutex_unlock(&file_priv->event_read_lock); 531 mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
713 list_add(&p->pending_link, &file_priv->pending_event_list); 602 list_add(&p->pending_link, &file_priv->pending_event_list);
714 p->file_priv = file_priv; 603 p->file_priv = file_priv;
715 604
716 /* we *could* pass this in as arg, but everyone uses kfree: */
717 p->destroy = (void (*) (struct drm_pending_event *)) kfree;
718
719 return 0; 605 return 0;
720} 606}
721EXPORT_SYMBOL(drm_event_reserve_init_locked); 607EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev,
778 list_del(&p->pending_link); 664 list_del(&p->pending_link);
779 } 665 }
780 spin_unlock_irqrestore(&dev->event_lock, flags); 666 spin_unlock_irqrestore(&dev->event_lock, flags);
781 p->destroy(p); 667 kfree(p);
782} 668}
783EXPORT_SYMBOL(drm_event_cancel_free); 669EXPORT_SYMBOL(drm_event_cancel_free);
784 670
@@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
800{ 686{
801 assert_spin_locked(&dev->event_lock); 687 assert_spin_locked(&dev->event_lock);
802 688
689 if (e->completion) {
690 /* ->completion might disappear as soon as it signalled. */
691 complete_all(e->completion);
692 e->completion = NULL;
693 }
694
695 if (e->fence) {
696 fence_signal(e->fence);
697 fence_put(e->fence);
698 }
699
803 if (!e->file_priv) { 700 if (!e->file_priv) {
804 e->destroy(e); 701 kfree(e);
805 return; 702 return;
806 } 703 }
807 704
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644
index 000000000000..0645c85d5f95
--- /dev/null
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3 *
4 * DRM core format related functions
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting documentation, and
10 * that the name of the copyright holders not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. The copyright holders make no representations
13 * about the suitability of this software for any purpose. It is provided "as
14 * is" without express or implied warranty.
15 *
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 * OF THIS SOFTWARE.
23 */
24
25#include <linux/bug.h>
26#include <linux/ctype.h>
27#include <linux/export.h>
28#include <linux/kernel.h>
29
30#include <drm/drmP.h>
31#include <drm/drm_fourcc.h>
32
33static char printable_char(int c)
34{
35 return isascii(c) && isprint(c) ? c : '?';
36}
37
38/**
39 * drm_get_format_name - return a string for drm fourcc format
40 * @format: format to compute name of
41 *
42 * Note that the buffer used by this function is globally shared and owned by
43 * the function itself.
44 *
45 * FIXME: This isn't really multithreading safe.
46 */
47const char *drm_get_format_name(uint32_t format)
48{
49 static char buf[32];
50
51 snprintf(buf, sizeof(buf),
52 "%c%c%c%c %s-endian (0x%08x)",
53 printable_char(format & 0xff),
54 printable_char((format >> 8) & 0xff),
55 printable_char((format >> 16) & 0xff),
56 printable_char((format >> 24) & 0x7f),
57 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
58 format);
59
60 return buf;
61}
62EXPORT_SYMBOL(drm_get_format_name);
63
64/**
65 * drm_fb_get_bpp_depth - get the bpp/depth values for format
66 * @format: pixel format (DRM_FORMAT_*)
67 * @depth: storage for the depth value
68 * @bpp: storage for the bpp value
69 *
70 * This only supports RGB formats here for compat with code that doesn't use
71 * pixel formats directly yet.
72 */
73void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
74 int *bpp)
75{
76 switch (format) {
77 case DRM_FORMAT_C8:
78 case DRM_FORMAT_RGB332:
79 case DRM_FORMAT_BGR233:
80 *depth = 8;
81 *bpp = 8;
82 break;
83 case DRM_FORMAT_XRGB1555:
84 case DRM_FORMAT_XBGR1555:
85 case DRM_FORMAT_RGBX5551:
86 case DRM_FORMAT_BGRX5551:
87 case DRM_FORMAT_ARGB1555:
88 case DRM_FORMAT_ABGR1555:
89 case DRM_FORMAT_RGBA5551:
90 case DRM_FORMAT_BGRA5551:
91 *depth = 15;
92 *bpp = 16;
93 break;
94 case DRM_FORMAT_RGB565:
95 case DRM_FORMAT_BGR565:
96 *depth = 16;
97 *bpp = 16;
98 break;
99 case DRM_FORMAT_RGB888:
100 case DRM_FORMAT_BGR888:
101 *depth = 24;
102 *bpp = 24;
103 break;
104 case DRM_FORMAT_XRGB8888:
105 case DRM_FORMAT_XBGR8888:
106 case DRM_FORMAT_RGBX8888:
107 case DRM_FORMAT_BGRX8888:
108 *depth = 24;
109 *bpp = 32;
110 break;
111 case DRM_FORMAT_XRGB2101010:
112 case DRM_FORMAT_XBGR2101010:
113 case DRM_FORMAT_RGBX1010102:
114 case DRM_FORMAT_BGRX1010102:
115 case DRM_FORMAT_ARGB2101010:
116 case DRM_FORMAT_ABGR2101010:
117 case DRM_FORMAT_RGBA1010102:
118 case DRM_FORMAT_BGRA1010102:
119 *depth = 30;
120 *bpp = 32;
121 break;
122 case DRM_FORMAT_ARGB8888:
123 case DRM_FORMAT_ABGR8888:
124 case DRM_FORMAT_RGBA8888:
125 case DRM_FORMAT_BGRA8888:
126 *depth = 32;
127 *bpp = 32;
128 break;
129 default:
130 DRM_DEBUG_KMS("unsupported pixel format %s\n",
131 drm_get_format_name(format));
132 *depth = 0;
133 *bpp = 0;
134 break;
135 }
136}
137EXPORT_SYMBOL(drm_fb_get_bpp_depth);
138
139/**
140 * drm_format_num_planes - get the number of planes for format
141 * @format: pixel format (DRM_FORMAT_*)
142 *
143 * Returns:
144 * The number of planes used by the specified pixel format.
145 */
146int drm_format_num_planes(uint32_t format)
147{
148 switch (format) {
149 case DRM_FORMAT_YUV410:
150 case DRM_FORMAT_YVU410:
151 case DRM_FORMAT_YUV411:
152 case DRM_FORMAT_YVU411:
153 case DRM_FORMAT_YUV420:
154 case DRM_FORMAT_YVU420:
155 case DRM_FORMAT_YUV422:
156 case DRM_FORMAT_YVU422:
157 case DRM_FORMAT_YUV444:
158 case DRM_FORMAT_YVU444:
159 return 3;
160 case DRM_FORMAT_NV12:
161 case DRM_FORMAT_NV21:
162 case DRM_FORMAT_NV16:
163 case DRM_FORMAT_NV61:
164 case DRM_FORMAT_NV24:
165 case DRM_FORMAT_NV42:
166 return 2;
167 default:
168 return 1;
169 }
170}
171EXPORT_SYMBOL(drm_format_num_planes);
172
173/**
174 * drm_format_plane_cpp - determine the bytes per pixel value
175 * @format: pixel format (DRM_FORMAT_*)
176 * @plane: plane index
177 *
178 * Returns:
179 * The bytes per pixel value for the specified plane.
180 */
181int drm_format_plane_cpp(uint32_t format, int plane)
182{
183 unsigned int depth;
184 int bpp;
185
186 if (plane >= drm_format_num_planes(format))
187 return 0;
188
189 switch (format) {
190 case DRM_FORMAT_YUYV:
191 case DRM_FORMAT_YVYU:
192 case DRM_FORMAT_UYVY:
193 case DRM_FORMAT_VYUY:
194 return 2;
195 case DRM_FORMAT_NV12:
196 case DRM_FORMAT_NV21:
197 case DRM_FORMAT_NV16:
198 case DRM_FORMAT_NV61:
199 case DRM_FORMAT_NV24:
200 case DRM_FORMAT_NV42:
201 return plane ? 2 : 1;
202 case DRM_FORMAT_YUV410:
203 case DRM_FORMAT_YVU410:
204 case DRM_FORMAT_YUV411:
205 case DRM_FORMAT_YVU411:
206 case DRM_FORMAT_YUV420:
207 case DRM_FORMAT_YVU420:
208 case DRM_FORMAT_YUV422:
209 case DRM_FORMAT_YVU422:
210 case DRM_FORMAT_YUV444:
211 case DRM_FORMAT_YVU444:
212 return 1;
213 default:
214 drm_fb_get_bpp_depth(format, &depth, &bpp);
215 return bpp >> 3;
216 }
217}
218EXPORT_SYMBOL(drm_format_plane_cpp);
219
220/**
221 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
222 * @format: pixel format (DRM_FORMAT_*)
223 *
224 * Returns:
225 * The horizontal chroma subsampling factor for the
226 * specified pixel format.
227 */
228int drm_format_horz_chroma_subsampling(uint32_t format)
229{
230 switch (format) {
231 case DRM_FORMAT_YUV411:
232 case DRM_FORMAT_YVU411:
233 case DRM_FORMAT_YUV410:
234 case DRM_FORMAT_YVU410:
235 return 4;
236 case DRM_FORMAT_YUYV:
237 case DRM_FORMAT_YVYU:
238 case DRM_FORMAT_UYVY:
239 case DRM_FORMAT_VYUY:
240 case DRM_FORMAT_NV12:
241 case DRM_FORMAT_NV21:
242 case DRM_FORMAT_NV16:
243 case DRM_FORMAT_NV61:
244 case DRM_FORMAT_YUV422:
245 case DRM_FORMAT_YVU422:
246 case DRM_FORMAT_YUV420:
247 case DRM_FORMAT_YVU420:
248 return 2;
249 default:
250 return 1;
251 }
252}
253EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
254
255/**
256 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
257 * @format: pixel format (DRM_FORMAT_*)
258 *
259 * Returns:
260 * The vertical chroma subsampling factor for the
261 * specified pixel format.
262 */
263int drm_format_vert_chroma_subsampling(uint32_t format)
264{
265 switch (format) {
266 case DRM_FORMAT_YUV410:
267 case DRM_FORMAT_YVU410:
268 return 4;
269 case DRM_FORMAT_YUV420:
270 case DRM_FORMAT_YVU420:
271 case DRM_FORMAT_NV12:
272 case DRM_FORMAT_NV21:
273 return 2;
274 default:
275 return 1;
276 }
277}
278EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
279
280/**
281 * drm_format_plane_width - width of the plane given the first plane
282 * @width: width of the first plane
283 * @format: pixel format
284 * @plane: plane index
285 *
286 * Returns:
287 * The width of @plane, given that the width of the first plane is @width.
288 */
289int drm_format_plane_width(int width, uint32_t format, int plane)
290{
291 if (plane >= drm_format_num_planes(format))
292 return 0;
293
294 if (plane == 0)
295 return width;
296
297 return width / drm_format_horz_chroma_subsampling(format);
298}
299EXPORT_SYMBOL(drm_format_plane_width);
300
301/**
302 * drm_format_plane_height - height of the plane given the first plane
303 * @height: height of the first plane
304 * @format: pixel format
305 * @plane: plane index
306 *
307 * Returns:
308 * The height of @plane, given that the height of the first plane is @height.
309 */
310int drm_format_plane_height(int height, uint32_t format, int plane)
311{
312 if (plane >= drm_format_num_planes(format))
313 return 0;
314
315 if (plane == 0)
316 return height;
317
318 return height / drm_format_vert_chroma_subsampling(format);
319}
320EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 32156060b9c9..5c19dde1cd31 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
787 * @kref: kref of the object to free 787 * @kref: kref of the object to free
788 * 788 *
789 * Called after the last reference to the object has been lost. 789 * Called after the last reference to the object has been lost.
790 * Must be called holding struct_ mutex 790 * Must be called holding &drm_device->struct_mutex.
791 * 791 *
792 * Frees the object 792 * Frees the object
793 */ 793 */
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e1ab008b3f08..1d6c335584ec 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
121 return cma_obj; 121 return cma_obj;
122 122
123error: 123error:
124 drm->driver->gem_free_object(&cma_obj->base); 124 drm_gem_object_unreference_unlocked(&cma_obj->base);
125 return ERR_PTR(ret); 125 return ERR_PTR(ret);
126} 126}
127EXPORT_SYMBOL_GPL(drm_gem_cma_create); 127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
162 * and handle has the id what user can see. 162 * and handle has the id what user can see.
163 */ 163 */
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle); 164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165 if (ret)
166 goto err_handle_create;
167
168 /* drop reference from allocate - handle holds it now. */ 165 /* drop reference from allocate - handle holds it now. */
169 drm_gem_object_unreference_unlocked(gem_obj); 166 drm_gem_object_unreference_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
170 169
171 return cma_obj; 170 return cma_obj;
172
173err_handle_create:
174 drm->driver->gem_free_object(gem_obj);
175
176 return ERR_PTR(ret);
177} 171}
178 172
179/** 173/**
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5d469b2f26f4..9ae353f4dd06 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data)
50 struct drm_info_node *node = (struct drm_info_node *) m->private; 50 struct drm_info_node *node = (struct drm_info_node *) m->private;
51 struct drm_minor *minor = node->minor; 51 struct drm_minor *minor = node->minor;
52 struct drm_device *dev = minor->dev; 52 struct drm_device *dev = minor->dev;
53 struct drm_master *master = minor->master; 53 struct drm_master *master;
54 if (!master)
55 return 0;
56
57 if (master->unique) {
58 seq_printf(m, "%s %s %s\n",
59 dev->driver->name,
60 dev_name(dev->dev), master->unique);
61 } else {
62 seq_printf(m, "%s %s\n",
63 dev->driver->name, dev_name(dev->dev));
64 }
65 return 0;
66}
67
68/**
69 * Called when "/proc/dri/.../vm" is read.
70 *
71 * Prints information about all mappings in drm_device::maplist.
72 */
73int drm_vm_info(struct seq_file *m, void *data)
74{
75 struct drm_info_node *node = (struct drm_info_node *) m->private;
76 struct drm_device *dev = node->minor->dev;
77 struct drm_local_map *map;
78 struct drm_map_list *r_list;
79
80 /* Hardcoded from _DRM_FRAME_BUFFER,
81 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
82 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
83 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
84 const char *type;
85 int i;
86
87 mutex_lock(&dev->struct_mutex);
88 seq_printf(m, "slot offset size type flags address mtrr\n\n");
89 i = 0;
90 list_for_each_entry(r_list, &dev->maplist, head) {
91 map = r_list->map;
92 if (!map)
93 continue;
94 if (map->type < 0 || map->type > 5)
95 type = "??";
96 else
97 type = types[map->type];
98
99 seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
100 i,
101 (unsigned long long)map->offset,
102 map->size, type, map->flags,
103 (unsigned long) r_list->user_token);
104 if (map->mtrr < 0)
105 seq_printf(m, "none\n");
106 else
107 seq_printf(m, "%4d\n", map->mtrr);
108 i++;
109 }
110 mutex_unlock(&dev->struct_mutex);
111 return 0;
112}
113 54
114/** 55 mutex_lock(&dev->master_mutex);
115 * Called when "/proc/dri/.../bufs" is read. 56 master = dev->master;
116 */ 57 if (!master)
117int drm_bufs_info(struct seq_file *m, void *data) 58 goto out_unlock;
118{ 59
119 struct drm_info_node *node = (struct drm_info_node *) m->private; 60 seq_printf(m, "%s", dev->driver->name);
120 struct drm_device *dev = node->minor->dev; 61 if (dev->dev)
121 struct drm_device_dma *dma; 62 seq_printf(m, " dev=%s", dev_name(dev->dev));
122 int i, seg_pages; 63 if (master && master->unique)
123 64 seq_printf(m, " master=%s", master->unique);
124 mutex_lock(&dev->struct_mutex); 65 if (dev->unique)
125 dma = dev->dma; 66 seq_printf(m, " unique=%s", dev->unique);
126 if (!dma) {
127 mutex_unlock(&dev->struct_mutex);
128 return 0;
129 }
130
131 seq_printf(m, " o size count free segs pages kB\n\n");
132 for (i = 0; i <= DRM_MAX_ORDER; i++) {
133 if (dma->bufs[i].buf_count) {
134 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
135 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
136 i,
137 dma->bufs[i].buf_size,
138 dma->bufs[i].buf_count,
139 0,
140 dma->bufs[i].seg_count,
141 seg_pages,
142 seg_pages * PAGE_SIZE / 1024);
143 }
144 }
145 seq_printf(m, "\n");
146 for (i = 0; i < dma->buf_count; i++) {
147 if (i && !(i % 32))
148 seq_printf(m, "\n");
149 seq_printf(m, " %d", dma->buflist[i]->list);
150 }
151 seq_printf(m, "\n"); 67 seq_printf(m, "\n");
152 mutex_unlock(&dev->struct_mutex); 68out_unlock:
69 mutex_unlock(&dev->master_mutex);
70
153 return 0; 71 return 0;
154} 72}
155 73
@@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data)
184 task ? task->comm : "<unknown>", 102 task ? task->comm : "<unknown>",
185 pid_vnr(priv->pid), 103 pid_vnr(priv->pid),
186 priv->minor->index, 104 priv->minor->index,
187 priv->is_master ? 'y' : 'n', 105 drm_is_current_master(priv) ? 'y' : 'n',
188 priv->authenticated ? 'y' : 'n', 106 priv->authenticated ? 'y' : 'n',
189 from_kuid_munged(seq_user_ns(m), priv->uid), 107 from_kuid_munged(seq_user_ns(m), priv->uid),
190 priv->magic); 108 priv->magic);
@@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data)
194 return 0; 112 return 0;
195} 113}
196 114
197
198static int drm_gem_one_name_info(int id, void *ptr, void *data) 115static int drm_gem_one_name_info(int id, void *ptr, void *data)
199{ 116{
200 struct drm_gem_object *obj = ptr; 117 struct drm_gem_object *obj = ptr;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 902cf6a15212..b86dc9b921a5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex;
29void drm_lastclose(struct drm_device *dev); 29void drm_lastclose(struct drm_device *dev);
30 30
31/* drm_pci.c */ 31/* drm_pci.c */
32int drm_pci_set_unique(struct drm_device *dev,
33 struct drm_master *master,
34 struct drm_unique *u);
35int drm_irq_by_busid(struct drm_device *dev, void *data, 32int drm_irq_by_busid(struct drm_device *dev, void *data,
36 struct drm_file *file_priv); 33 struct drm_file *file_priv);
37 34
38/* drm_vm.c */
39int drm_vma_info(struct seq_file *m, void *data);
40
41/* drm_prime.c */ 35/* drm_prime.c */
42int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 36int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
43 struct drm_file *file_priv); 37 struct drm_file *file_priv);
@@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
51 45
52/* drm_info.c */ 46/* drm_info.c */
53int drm_name_info(struct seq_file *m, void *data); 47int drm_name_info(struct seq_file *m, void *data);
54int drm_vm_info(struct seq_file *m, void *data);
55int drm_bufs_info(struct seq_file *m, void *data);
56int drm_clients_info(struct seq_file *m, void* data); 48int drm_clients_info(struct seq_file *m, void* data);
57int drm_gem_name_info(struct seq_file *m, void *data); 49int drm_gem_name_info(struct seq_file *m, void *data);
58 50
@@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data,
67 struct drm_file *file_priv); 59 struct drm_file *file_priv);
68int drm_authmagic(struct drm_device *dev, void *data, 60int drm_authmagic(struct drm_device *dev, void *data,
69 struct drm_file *file_priv); 61 struct drm_file *file_priv);
62int drm_setmaster_ioctl(struct drm_device *dev, void *data,
63 struct drm_file *file_priv);
64int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv);
66int drm_master_open(struct drm_file *file_priv);
67void drm_master_release(struct drm_file *file_priv);
70 68
71/* drm_sysfs.c */ 69/* drm_sysfs.c */
72extern struct class *drm_class; 70extern struct class *drm_class;
@@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
92void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 90void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
93void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 91void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
94 92
95/* drm_drv.c */
96int drm_setmaster_ioctl(struct drm_device *dev, void *data,
97 struct drm_file *file_priv);
98int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
99 struct drm_file *file_priv);
100struct drm_master *drm_master_create(struct drm_minor *minor);
101
102/* drm_debugfs.c */ 93/* drm_debugfs.c */
103#if defined(CONFIG_DEBUG_FS) 94#if defined(CONFIG_DEBUG_FS)
104int drm_debugfs_init(struct drm_minor *minor, int minor_id, 95int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b7a39771c152..1f84ff5f1bf8 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_core.h> 32#include <drm/drm_core.h>
33#include <drm/drm_auth.h>
33#include "drm_legacy.h" 34#include "drm_legacy.h"
34#include "drm_internal.h" 35#include "drm_internal.h"
35#include "drm_crtc_internal.h" 36#include "drm_crtc_internal.h"
@@ -37,6 +38,64 @@
37#include <linux/pci.h> 38#include <linux/pci.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
41/**
42 * DOC: getunique and setversion story
43 *
44 * BEWARE THE DRAGONS! MIND THE TRAPDOORS!
45 *
46 * In an attempt to warn anyone else who's trying to figure out what's going
47 * on here, I'll try to summarize the story. First things first, let's clear up
48 * the names, because the kernel internals, libdrm and the ioctls are all named
49 * differently:
50 *
51 * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
52 * through the drmGetBusid function.
53 * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
54 * that code is nerved in the kernel with drm_invalid_op().
55 * - The internal set_busid kernel functions and driver callbacks are
56 * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
57 * nerved) allowed userspace to set the busid through the above ioctl.
58 * - Other ioctls and functions involved are named consistently.
59 *
60 * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
61 * handling pci domains in the busid on ppc. Doing this correctly was only
62 * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
63 * special with drm 1.2 and 1.3.
64 *
65 * Now the actual horror story of how device lookup in drm works. At large,
66 * there's 2 different ways, either by busid, or by device driver name.
67 *
68 * Opening by busid is fairly simple:
69 *
70 * 1. First call SET_VERSION to make sure pci domains are handled properly. As a
71 * side-effect this fills out the unique name in the master structure.
72 * 2. Call GET_UNIQUE to read out the unique name from the master structure,
73 * which matches the busid thanks to step 1. If it doesn't, proceed to try
74 * the next device node.
75 *
76 * Opening by name is slightly different:
77 *
78 * 1. Directly call VERSION to get the version and to match against the driver
79 * name returned by that ioctl. Note that SET_VERSION is not called, which
80 * means the the unique name for the master node just opening is _not_ filled
81 * out. This despite that with current drm device nodes are always bound to
82 * one device, and can't be runtime assigned like with drm 1.0.
83 * 2. Match driver name. If it mismatches, proceed to the next device node.
84 * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
85 * checking that the first byte in the string is 0). If that's not the case
86 * libdrm skips and proceeds to the next device node. Probably this is just
87 * copypasta from drm 1.0 times where a set unique name meant that the driver
88 * was in use already, but that's just conjecture.
89 *
90 * Long story short: To keep the open by name logic working, GET_UNIQUE must
91 * _not_ return a unique string when SET_VERSION hasn't been called yet,
92 * otherwise libdrm breaks. Even when that unique string can't ever change, and
93 * is totally irrelevant for actually opening the device because runtime
94 * assignable device instances were only support in drm 1.0, which is long dead.
95 * But the libdrm code in drmOpenByName somehow survived, hence this can't be
96 * broken.
97 */
98
40static int drm_version(struct drm_device *dev, void *data, 99static int drm_version(struct drm_device *dev, void *data,
41 struct drm_file *file_priv); 100 struct drm_file *file_priv);
42 101
@@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev,
75 master->unique_len = 0; 134 master->unique_len = 0;
76} 135}
77 136
78/*
79 * Set the bus id.
80 *
81 * \param inode device inode.
82 * \param file_priv DRM file private.
83 * \param cmd command.
84 * \param arg user argument, pointing to a drm_unique structure.
85 * \return zero on success or a negative number on failure.
86 *
87 * Copies the bus id from userspace into drm_device::unique, and verifies that
88 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
89 * in interface version 1.1 and will return EBUSY when setversion has requested
90 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
91 * UMS was only ever supported on pci devices.
92 */
93static int drm_setunique(struct drm_device *dev, void *data,
94 struct drm_file *file_priv)
95{
96 struct drm_unique *u = data;
97 struct drm_master *master = file_priv->master;
98 int ret;
99
100 if (master->unique_len || master->unique)
101 return -EBUSY;
102
103 if (!u->unique_len || u->unique_len > 1024)
104 return -EINVAL;
105
106 if (drm_core_check_feature(dev, DRIVER_MODESET))
107 return 0;
108
109 if (WARN_ON(!dev->pdev))
110 return -EINVAL;
111
112 ret = drm_pci_set_unique(dev, master, u);
113 if (ret)
114 goto err;
115
116 return 0;
117
118err:
119 drm_unset_busid(dev, master);
120 return ret;
121}
122
123static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) 137static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
124{ 138{
125 struct drm_master *master = file_priv->master; 139 struct drm_master *master = file_priv->master;
@@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
135 return ret; 149 return ret;
136 } 150 }
137 } else { 151 } else {
138 if (WARN(dev->unique == NULL, 152 WARN_ON(!dev->unique);
139 "No drm_driver.set_busid() implementation provided by "
140 "%ps. Use drm_dev_set_unique() to set the unique "
141 "name explicitly.", dev->driver))
142 return -EINVAL;
143
144 master->unique = kstrdup(dev->unique, GFP_KERNEL); 153 master->unique = kstrdup(dev->unique, GFP_KERNEL);
145 if (master->unique) 154 if (master->unique)
146 master->unique_len = strlen(dev->unique); 155 master->unique_len = strlen(dev->unique);
@@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
473 return -EACCES; 482 return -EACCES;
474 483
475 /* MASTER is only for master or control clients */ 484 /* MASTER is only for master or control clients */
476 if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && 485 if (unlikely((flags & DRM_MASTER) &&
486 !drm_is_current_master(file_priv) &&
477 !drm_is_control_client(file_priv))) 487 !drm_is_control_client(file_priv)))
478 return -EACCES; 488 return -EACCES;
479 489
@@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
504 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 514 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
505 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), 515 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
506 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 516 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
507 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 517 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
508 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 518 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
509 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), 519 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
510 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 520 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
@@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
513 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), 523 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
514 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 524 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
515 525
516 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 526 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
517 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 527 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
518 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 528 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
519 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 529 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
520 530
521 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 531 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
522 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), 532 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
524 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 534 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
525 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), 535 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
526 536
527 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 537 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
528 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 538 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
529 539
530 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), 540 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
531 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 541 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c18fe..8ca3d2bf2bda 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
42#include <linux/vgaarb.h> 42#include <linux/vgaarb.h>
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, pipe, count) \
47 ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
48
49/* Retry timestamp calculation up to 3 times to satisfy 45/* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up. 46 * drm_timestamp_precision before giving up.
51 */ 47 */
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
82 struct timeval *t_vblank, u32 last) 78 struct timeval *t_vblank, u32 last)
83{ 79{
84 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 80 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
85 u32 tslot;
86 81
87 assert_spin_locked(&dev->vblank_time_lock); 82 assert_spin_locked(&dev->vblank_time_lock);
88 83
89 vblank->last = last; 84 vblank->last = last;
90 85
91 /* All writers hold the spinlock, but readers are serialized by 86 write_seqlock(&vblank->seqlock);
92 * the latching of vblank->count below. 87 vblank->time = *t_vblank;
93 */
94 tslot = vblank->count + vblank_count_inc;
95 vblanktimestamp(dev, pipe, tslot) = *t_vblank;
96
97 /*
98 * vblank timestamp updates are protected on the write side with
99 * vblank_time_lock, but on the read side done locklessly using a
100 * sequence-lock on the vblank counter. Ensure correct ordering using
101 * memory barrriers. We need the barrier both before and also after the
102 * counter update to synchronize with the next timestamp write.
103 * The read-side barriers for this are in drm_vblank_count_and_time.
104 */
105 smp_wmb();
106 vblank->count += vblank_count_inc; 88 vblank->count += vblank_count_inc;
107 smp_wmb(); 89 write_sequnlock(&vblank->seqlock);
108} 90}
109 91
110/** 92/*
111 * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
112 * @dev: DRM device
113 * @pipe: index of CRTC for which to reset the timestamp
114 *
115 * Reset the stored timestamp for the current vblank count to correspond 93 * Reset the stored timestamp for the current vblank count to correspond
116 * to the last vblank occurred. 94 * to the last vblank occurred.
117 * 95 *
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
155 spin_unlock(&dev->vblank_time_lock); 133 spin_unlock(&dev->vblank_time_lock);
156} 134}
157 135
158/** 136/*
159 * drm_update_vblank_count - update the master vblank counter
160 * @dev: DRM device
161 * @pipe: counter to update
162 *
163 * Call back into the driver to update the appropriate vblank counter 137 * Call back into the driver to update the appropriate vblank counter
164 * (specified by @pipe). Deal with wraparound, if it occurred, and 138 * (specified by @pipe). Deal with wraparound, if it occurred, and
165 * update the last read value so we can deal with wraparound on the next 139 * update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
205 const struct timeval *t_old; 179 const struct timeval *t_old;
206 u64 diff_ns; 180 u64 diff_ns;
207 181
208 t_old = &vblanktimestamp(dev, pipe, vblank->count); 182 t_old = &vblank->time;
209 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); 183 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
210 184
211 /* 185 /*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
239 diff = 1; 213 diff = 1;
240 } 214 }
241 215
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
285 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 216 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
286 " current=%u, diff=%u, hw=%u hw_last=%u\n", 217 " current=%u, diff=%u, hw=%u hw_last=%u\n",
287 pipe, vblank->count, diff, cur_vblank, vblank->last); 218 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
303 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); 234 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
304} 235}
305 236
237/**
238 * drm_accurate_vblank_count - retrieve the master vblank counter
239 * @crtc: which counter to retrieve
240 *
241 * This function is similar to @drm_crtc_vblank_count but this
242 * function interpolates to handle a race with vblank irq's.
243 *
244 * This is mostly useful for hardware that can obtain the scanout
245 * position, but doesn't have a frame counter.
246 */
247u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
248{
249 struct drm_device *dev = crtc->dev;
250 unsigned int pipe = drm_crtc_index(crtc);
251 u32 vblank;
252 unsigned long flags;
253
254 WARN(!dev->driver->get_vblank_timestamp,
255 "This function requires support for accurate vblank timestamps.");
256
257 spin_lock_irqsave(&dev->vblank_time_lock, flags);
258
259 drm_update_vblank_count(dev, pipe, 0);
260 vblank = drm_vblank_count(dev, pipe);
261
262 spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
263
264 return vblank;
265}
266EXPORT_SYMBOL(drm_accurate_vblank_count);
267
306/* 268/*
307 * Disable vblank irq's on crtc, make sure that last vblank count 269 * Disable vblank irq's on crtc, make sure that last vblank count
308 * of hardware and corresponding consistent software vblank counter 270 * of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
417 init_waitqueue_head(&vblank->queue); 379 init_waitqueue_head(&vblank->queue);
418 setup_timer(&vblank->disable_timer, vblank_disable_fn, 380 setup_timer(&vblank->disable_timer, vblank_disable_fn,
419 (unsigned long)vblank); 381 (unsigned long)vblank);
382 seqlock_init(&vblank->seqlock);
420 } 383 }
421 384
422 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 385 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -986,25 +949,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
986 struct timeval *vblanktime) 949 struct timeval *vblanktime)
987{ 950{
988 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 951 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
989 int count = DRM_TIMESTAMP_MAXRETRIES; 952 u32 vblank_count;
990 u32 cur_vblank; 953 unsigned int seq;
991 954
992 if (WARN_ON(pipe >= dev->num_crtcs)) 955 if (WARN_ON(pipe >= dev->num_crtcs))
993 return 0; 956 return 0;
994 957
995 /*
996 * Vblank timestamps are read lockless. To ensure consistency the vblank
997 * counter is rechecked and ordering is ensured using memory barriers.
998 * This works like a seqlock. The write-side barriers are in store_vblank.
999 */
1000 do { 958 do {
1001 cur_vblank = vblank->count; 959 seq = read_seqbegin(&vblank->seqlock);
1002 smp_rmb(); 960 vblank_count = vblank->count;
1003 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); 961 *vblanktime = vblank->time;
1004 smp_rmb(); 962 } while (read_seqretry(&vblank->seqlock, seq));
1005 } while (cur_vblank != vblank->count && --count > 0);
1006 963
1007 return cur_vblank; 964 return vblank_count;
1008} 965}
1009EXPORT_SYMBOL(drm_vblank_count_and_time); 966EXPORT_SYMBOL(drm_vblank_count_and_time);
1010 967
@@ -1037,39 +994,11 @@ static void send_vblank_event(struct drm_device *dev,
1037 e->event.tv_sec = now->tv_sec; 994 e->event.tv_sec = now->tv_sec;
1038 e->event.tv_usec = now->tv_usec; 995 e->event.tv_usec = now->tv_usec;
1039 996
1040 drm_send_event_locked(dev, &e->base);
1041
1042 trace_drm_vblank_event_delivered(e->base.pid, e->pipe, 997 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
1043 e->event.sequence); 998 e->event.sequence);
1044}
1045 999
1046/** 1000 drm_send_event_locked(dev, &e->base);
1047 * drm_arm_vblank_event - arm vblank event after pageflip
1048 * @dev: DRM device
1049 * @pipe: CRTC index
1050 * @e: the event to prepare to send
1051 *
1052 * A lot of drivers need to generate vblank events for the very next vblank
1053 * interrupt. For example when the page flip interrupt happens when the page
1054 * flip gets armed, but not when it actually executes within the next vblank
1055 * period. This helper function implements exactly the required vblank arming
1056 * behaviour.
1057 *
1058 * Caller must hold event lock. Caller must also hold a vblank reference for
1059 * the event @e, which will be dropped when the next vblank arrives.
1060 *
1061 * This is the legacy version of drm_crtc_arm_vblank_event().
1062 */
1063void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1064 struct drm_pending_vblank_event *e)
1065{
1066 assert_spin_locked(&dev->event_lock);
1067
1068 e->pipe = pipe;
1069 e->event.sequence = drm_vblank_count(dev, pipe);
1070 list_add_tail(&e->base.link, &dev->vblank_event_list);
1071} 1001}
1072EXPORT_SYMBOL(drm_arm_vblank_event);
1073 1002
1074/** 1003/**
1075 * drm_crtc_arm_vblank_event - arm vblank event after pageflip 1004 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
@@ -1084,32 +1013,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
1084 * 1013 *
1085 * Caller must hold event lock. Caller must also hold a vblank reference for 1014 * Caller must hold event lock. Caller must also hold a vblank reference for
1086 * the event @e, which will be dropped when the next vblank arrives. 1015 * the event @e, which will be dropped when the next vblank arrives.
1087 *
1088 * This is the native KMS version of drm_arm_vblank_event().
1089 */ 1016 */
1090void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 1017void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1091 struct drm_pending_vblank_event *e) 1018 struct drm_pending_vblank_event *e)
1092{ 1019{
1093 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); 1020 struct drm_device *dev = crtc->dev;
1021 unsigned int pipe = drm_crtc_index(crtc);
1022
1023 assert_spin_locked(&dev->event_lock);
1024
1025 e->pipe = pipe;
1026 e->event.sequence = drm_vblank_count(dev, pipe);
1027 list_add_tail(&e->base.link, &dev->vblank_event_list);
1094} 1028}
1095EXPORT_SYMBOL(drm_crtc_arm_vblank_event); 1029EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1096 1030
1097/** 1031/**
1098 * drm_send_vblank_event - helper to send vblank event after pageflip 1032 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1099 * @dev: DRM device 1033 * @crtc: the source CRTC of the vblank event
1100 * @pipe: CRTC index
1101 * @e: the event to send 1034 * @e: the event to send
1102 * 1035 *
1103 * Updates sequence # and timestamp on event, and sends it to userspace. 1036 * Updates sequence # and timestamp on event, and sends it to userspace.
1104 * Caller must hold event lock. 1037 * Caller must hold event lock.
1105 *
1106 * This is the legacy version of drm_crtc_send_vblank_event().
1107 */ 1038 */
1108void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 1039void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1109 struct drm_pending_vblank_event *e) 1040 struct drm_pending_vblank_event *e)
1110{ 1041{
1042 struct drm_device *dev = crtc->dev;
1043 unsigned int seq, pipe = drm_crtc_index(crtc);
1111 struct timeval now; 1044 struct timeval now;
1112 unsigned int seq;
1113 1045
1114 if (dev->num_crtcs > 0) { 1046 if (dev->num_crtcs > 0) {
1115 seq = drm_vblank_count_and_time(dev, pipe, &now); 1047 seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1053,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
1121 e->pipe = pipe; 1053 e->pipe = pipe;
1122 send_vblank_event(dev, e, seq, &now); 1054 send_vblank_event(dev, e, seq, &now);
1123} 1055}
1124EXPORT_SYMBOL(drm_send_vblank_event);
1125
1126/**
1127 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1128 * @crtc: the source CRTC of the vblank event
1129 * @e: the event to send
1130 *
1131 * Updates sequence # and timestamp on event, and sends it to userspace.
1132 * Caller must hold event lock.
1133 *
1134 * This is the native KMS version of drm_send_vblank_event().
1135 */
1136void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1137 struct drm_pending_vblank_event *e)
1138{
1139 drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1140}
1141EXPORT_SYMBOL(drm_crtc_send_vblank_event); 1056EXPORT_SYMBOL(drm_crtc_send_vblank_event);
1142 1057
1143/** 1058/**
@@ -1193,7 +1108,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
1193 * Returns: 1108 * Returns:
1194 * Zero on success or a negative error code on failure. 1109 * Zero on success or a negative error code on failure.
1195 */ 1110 */
1196int drm_vblank_get(struct drm_device *dev, unsigned int pipe) 1111static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1197{ 1112{
1198 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1113 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1199 unsigned long irqflags; 1114 unsigned long irqflags;
@@ -1219,7 +1134,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1219 1134
1220 return ret; 1135 return ret;
1221} 1136}
1222EXPORT_SYMBOL(drm_vblank_get);
1223 1137
1224/** 1138/**
1225 * drm_crtc_vblank_get - get a reference count on vblank events 1139 * drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1142,6 @@ EXPORT_SYMBOL(drm_vblank_get);
1228 * Acquire a reference count on vblank events to avoid having them disabled 1142 * Acquire a reference count on vblank events to avoid having them disabled
1229 * while in use. 1143 * while in use.
1230 * 1144 *
1231 * This is the native kms version of drm_vblank_get().
1232 *
1233 * Returns: 1145 * Returns:
1234 * Zero on success or a negative error code on failure. 1146 * Zero on success or a negative error code on failure.
1235 */ 1147 */
@@ -1249,7 +1161,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
1249 * 1161 *
1250 * This is the legacy version of drm_crtc_vblank_put(). 1162 * This is the legacy version of drm_crtc_vblank_put().
1251 */ 1163 */
1252void drm_vblank_put(struct drm_device *dev, unsigned int pipe) 1164static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1253{ 1165{
1254 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1166 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1255 1167
@@ -1270,7 +1182,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1270 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 1182 jiffies + ((drm_vblank_offdelay * HZ)/1000));
1271 } 1183 }
1272} 1184}
1273EXPORT_SYMBOL(drm_vblank_put);
1274 1185
1275/** 1186/**
1276 * drm_crtc_vblank_put - give up ownership of vblank events 1187 * drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1189,6 @@ EXPORT_SYMBOL(drm_vblank_put);
1278 * 1189 *
1279 * Release ownership of a given vblank counter, turning off interrupts 1190 * Release ownership of a given vblank counter, turning off interrupts
1280 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 1191 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
1281 *
1282 * This is the native kms version of drm_vblank_put().
1283 */ 1192 */
1284void drm_crtc_vblank_put(struct drm_crtc *crtc) 1193void drm_crtc_vblank_put(struct drm_crtc *crtc)
1285{ 1194{
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d3b6ee357a2b..c6f422e879dd 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -88,14 +88,10 @@ struct drm_agp_mem {
88 struct list_head head; 88 struct list_head head;
89}; 89};
90 90
91/* 91/* drm_lock.c */
92 * Generic Userspace Locking-API
93 */
94
95int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
96int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); 92int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
97int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); 93int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
98int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx); 94void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
99 95
100/* DMA support */ 96/* DMA support */
101int drm_legacy_dma_setup(struct drm_device *dev); 97int drm_legacy_dma_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index daa2ff12101b..48ac0ebbd663 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -41,6 +41,110 @@
41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
42 42
43/** 43/**
44 * Take the heavyweight lock.
45 *
46 * \param lock lock pointer.
47 * \param context locking context.
48 * \return one if the lock is held, or zero otherwise.
49 *
50 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
51 */
52static
53int drm_lock_take(struct drm_lock_data *lock_data,
54 unsigned int context)
55{
56 unsigned int old, new, prev;
57 volatile unsigned int *lock = &lock_data->hw_lock->lock;
58
59 spin_lock_bh(&lock_data->spinlock);
60 do {
61 old = *lock;
62 if (old & _DRM_LOCK_HELD)
63 new = old | _DRM_LOCK_CONT;
64 else {
65 new = context | _DRM_LOCK_HELD |
66 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
67 _DRM_LOCK_CONT : 0);
68 }
69 prev = cmpxchg(lock, old, new);
70 } while (prev != old);
71 spin_unlock_bh(&lock_data->spinlock);
72
73 if (_DRM_LOCKING_CONTEXT(old) == context) {
74 if (old & _DRM_LOCK_HELD) {
75 if (context != DRM_KERNEL_CONTEXT) {
76 DRM_ERROR("%d holds heavyweight lock\n",
77 context);
78 }
79 return 0;
80 }
81 }
82
83 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
84 /* Have lock */
85 return 1;
86 }
87 return 0;
88}
89
90/**
91 * This takes a lock forcibly and hands it to context. Should ONLY be used
92 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
93 *
94 * \param dev DRM device.
95 * \param lock lock pointer.
96 * \param context locking context.
97 * \return always one.
98 *
99 * Resets the lock file pointer.
100 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
101 */
102static int drm_lock_transfer(struct drm_lock_data *lock_data,
103 unsigned int context)
104{
105 unsigned int old, new, prev;
106 volatile unsigned int *lock = &lock_data->hw_lock->lock;
107
108 lock_data->file_priv = NULL;
109 do {
110 old = *lock;
111 new = context | _DRM_LOCK_HELD;
112 prev = cmpxchg(lock, old, new);
113 } while (prev != old);
114 return 1;
115}
116
117static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
118 unsigned int context)
119{
120 unsigned int old, new, prev;
121 volatile unsigned int *lock = &lock_data->hw_lock->lock;
122
123 spin_lock_bh(&lock_data->spinlock);
124 if (lock_data->kernel_waiters != 0) {
125 drm_lock_transfer(lock_data, 0);
126 lock_data->idle_has_lock = 1;
127 spin_unlock_bh(&lock_data->spinlock);
128 return 1;
129 }
130 spin_unlock_bh(&lock_data->spinlock);
131
132 do {
133 old = *lock;
134 new = _DRM_LOCKING_CONTEXT(old);
135 prev = cmpxchg(lock, old, new);
136 } while (prev != old);
137
138 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
139 DRM_ERROR("%d freed heavyweight lock held by %d\n",
140 context, _DRM_LOCKING_CONTEXT(old));
141 return 1;
142 }
143 wake_up_interruptible(&lock_data->lock_queue);
144 return 0;
145}
146
147/**
44 * Lock ioctl. 148 * Lock ioctl.
45 * 149 *
46 * \param inode device inode. 150 * \param inode device inode.
@@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
115 /* don't set the block all signals on the master process for now 219 /* don't set the block all signals on the master process for now
116 * really probably not the correct answer but lets us debug xkb 220 * really probably not the correct answer but lets us debug xkb
117 * xserver for now */ 221 * xserver for now */
118 if (!file_priv->is_master) { 222 if (!drm_is_current_master(file_priv)) {
119 dev->sigdata.context = lock->context; 223 dev->sigdata.context = lock->context;
120 dev->sigdata.lock = master->lock.hw_lock; 224 dev->sigdata.lock = master->lock.hw_lock;
121 } 225 }
@@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
165} 269}
166 270
167/** 271/**
168 * Take the heavyweight lock.
169 *
170 * \param lock lock pointer.
171 * \param context locking context.
172 * \return one if the lock is held, or zero otherwise.
173 *
174 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
175 */
176static
177int drm_lock_take(struct drm_lock_data *lock_data,
178 unsigned int context)
179{
180 unsigned int old, new, prev;
181 volatile unsigned int *lock = &lock_data->hw_lock->lock;
182
183 spin_lock_bh(&lock_data->spinlock);
184 do {
185 old = *lock;
186 if (old & _DRM_LOCK_HELD)
187 new = old | _DRM_LOCK_CONT;
188 else {
189 new = context | _DRM_LOCK_HELD |
190 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
191 _DRM_LOCK_CONT : 0);
192 }
193 prev = cmpxchg(lock, old, new);
194 } while (prev != old);
195 spin_unlock_bh(&lock_data->spinlock);
196
197 if (_DRM_LOCKING_CONTEXT(old) == context) {
198 if (old & _DRM_LOCK_HELD) {
199 if (context != DRM_KERNEL_CONTEXT) {
200 DRM_ERROR("%d holds heavyweight lock\n",
201 context);
202 }
203 return 0;
204 }
205 }
206
207 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
208 /* Have lock */
209 return 1;
210 }
211 return 0;
212}
213
214/**
215 * This takes a lock forcibly and hands it to context. Should ONLY be used
216 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
217 *
218 * \param dev DRM device.
219 * \param lock lock pointer.
220 * \param context locking context.
221 * \return always one.
222 *
223 * Resets the lock file pointer.
224 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
225 */
226static int drm_lock_transfer(struct drm_lock_data *lock_data,
227 unsigned int context)
228{
229 unsigned int old, new, prev;
230 volatile unsigned int *lock = &lock_data->hw_lock->lock;
231
232 lock_data->file_priv = NULL;
233 do {
234 old = *lock;
235 new = context | _DRM_LOCK_HELD;
236 prev = cmpxchg(lock, old, new);
237 } while (prev != old);
238 return 1;
239}
240
241/**
242 * Free lock.
243 *
244 * \param dev DRM device.
245 * \param lock lock.
246 * \param context context.
247 *
248 * Resets the lock file pointer.
249 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
250 * waiting on the lock queue.
251 */
252int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
253{
254 unsigned int old, new, prev;
255 volatile unsigned int *lock = &lock_data->hw_lock->lock;
256
257 spin_lock_bh(&lock_data->spinlock);
258 if (lock_data->kernel_waiters != 0) {
259 drm_lock_transfer(lock_data, 0);
260 lock_data->idle_has_lock = 1;
261 spin_unlock_bh(&lock_data->spinlock);
262 return 1;
263 }
264 spin_unlock_bh(&lock_data->spinlock);
265
266 do {
267 old = *lock;
268 new = _DRM_LOCKING_CONTEXT(old);
269 prev = cmpxchg(lock, old, new);
270 } while (prev != old);
271
272 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
273 DRM_ERROR("%d freed heavyweight lock held by %d\n",
274 context, _DRM_LOCKING_CONTEXT(old));
275 return 1;
276 }
277 wake_up_interruptible(&lock_data->lock_queue);
278 return 0;
279}
280
281/**
282 * This function returns immediately and takes the hw lock 272 * This function returns immediately and takes the hw lock
283 * with the kernel context if it is free, otherwise it gets the highest priority when and if 273 * with the kernel context if it is free, otherwise it gets the highest priority when and if
284 * it is eventually released. 274 * it is eventually released.
@@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
330} 320}
331EXPORT_SYMBOL(drm_legacy_idlelock_release); 321EXPORT_SYMBOL(drm_legacy_idlelock_release);
332 322
333int drm_legacy_i_have_hw_lock(struct drm_device *dev, 323static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
334 struct drm_file *file_priv) 324 struct drm_file *file_priv)
335{ 325{
336 struct drm_master *master = file_priv->master; 326 struct drm_master *master = file_priv->master;
337 return (file_priv->lock_count && master->lock.hw_lock && 327 return (file_priv->lock_count && master->lock.hw_lock &&
338 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && 328 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
339 master->lock.file_priv == file_priv); 329 master->lock.file_priv == file_priv);
340} 330}
331
332void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
333{
334 struct drm_file *file_priv = filp->private_data;
335
336 /* if the master has gone away we can't do anything with the lock */
337 if (!dev->master)
338 return;
339
340 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
341 DRM_DEBUG("File %p released, freeing lock for context %d\n",
342 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
343 drm_legacy_lock_free(&file_priv->master->lock,
344 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
345 }
346}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a90c..49311fc61d5d 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
60 return 0; 60 return 0;
61} 61}
62 62
63static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
64{
65 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
66 int err;
67
68 err = of_device_uevent_modalias(dev, env);
69 if (err != -ENODEV)
70 return err;
71
72 add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
73 dsi->name);
74
75 return 0;
76}
77
63static const struct dev_pm_ops mipi_dsi_device_pm_ops = { 78static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
64 .runtime_suspend = pm_generic_runtime_suspend, 79 .runtime_suspend = pm_generic_runtime_suspend,
65 .runtime_resume = pm_generic_runtime_resume, 80 .runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
74static struct bus_type mipi_dsi_bus_type = { 89static struct bus_type mipi_dsi_bus_type = {
75 .name = "mipi-dsi", 90 .name = "mipi-dsi",
76 .match = mipi_dsi_device_match, 91 .match = mipi_dsi_device_match,
92 .uevent = mipi_dsi_uevent,
77 .pm = &mipi_dsi_device_pm_ops, 93 .pm = &mipi_dsi_device_pm_ops,
78}; 94};
79 95
@@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
983EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); 999EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
984 1000
985/** 1001/**
1002 * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect
1003 * output signal on the TE signal line when display module reaches line N
1004 * defined by STS[n:0].
1005 * @dsi: DSI peripheral device
1006 * @param: STS[10:0]
1007 * Return: 0 on success or a negative error code on failure
1008 */
1009int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
1010{
1011 u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8,
1012 param & 0xff };
1013 ssize_t err;
1014
1015 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
1016 if (err < 0)
1017 return err;
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(mipi_dsi_set_tear_scanline);
1022
1023/**
986 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image 1024 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
987 * data used by the interface 1025 * data used by the interface
988 * @dsi: DSI peripheral device 1026 * @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..cb39f45d6a16 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
180{ 180{
181 struct drm_mm_node *hole; 181 struct drm_mm_node *hole;
182 u64 end = node->start + node->size; 182 u64 end;
183 u64 hole_start; 183 u64 hole_start;
184 u64 hole_end; 184 u64 hole_end;
185 185
186 BUG_ON(node == NULL); 186 BUG_ON(node == NULL);
187 187
188 end = node->start + node->size;
189
188 /* Find the relevant hole to add our node to */ 190 /* Find the relevant hole to add our node to */
189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
190 if (hole_start > node->start || hole_end < end) 192 if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 7def3d58da18..fc5040ae5f25 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
544 * 544 *
545 * This function is to create the modeline based on the GTF algorithm. 545 * This function is to create the modeline based on the GTF algorithm.
546 * Generalized Timing Formula is derived from: 546 * Generalized Timing Formula is derived from:
547 *
547 * GTF Spreadsheet by Andy Morrish (1/5/97) 548 * GTF Spreadsheet by Andy Morrish (1/5/97)
548 * available at http://www.vesa.org 549 * available at http://www.vesa.org
549 * 550 *
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
552 * I also refer to the function of fb_get_mode in the file of 553 * I also refer to the function of fb_get_mode in the file of
553 * drivers/video/fbmon.c 554 * drivers/video/fbmon.c
554 * 555 *
555 * Standard GTF parameters: 556 * Standard GTF parameters::
557 *
556 * M = 600 558 * M = 600
557 * C = 40 559 * C = 40
558 * K = 128 560 * K = 128
@@ -1518,6 +1520,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1518 if (out->status != MODE_OK) 1520 if (out->status != MODE_OK)
1519 goto out; 1521 goto out;
1520 1522
1523 drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1524
1521 ret = 0; 1525 ret = 0;
1522 1526
1523out: 1527out:
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03e7b..61146f5b4f56 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
30 * 30 *
31 * As KMS moves toward more fine grained locking, and atomic ioctl where 31 * As KMS moves toward more fine grained locking, and atomic ioctl where
32 * userspace can indirectly control locking order, it becomes necessary 32 * userspace can indirectly control locking order, it becomes necessary
33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because 33 * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
34 * the locking is more distributed around the driver code, we want a bit 34 * the locking is more distributed around the driver code, we want a bit
35 * of extra utility/tracking out of our acquire-ctx. This is provided 35 * of extra utility/tracking out of our acquire-ctx. This is provided
36 * by drm_modeset_lock / drm_modeset_acquire_ctx. 36 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37 * 37 *
38 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt 38 * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39 * 39 *
40 * The basic usage pattern is to: 40 * The basic usage pattern is to::
41 * 41 *
42 * drm_modeset_acquire_init(&ctx) 42 * drm_modeset_acquire_init(&ctx)
43 * retry: 43 * retry:
@@ -51,6 +51,13 @@
51 * ... do stuff ... 51 * ... do stuff ...
52 * drm_modeset_drop_locks(&ctx); 52 * drm_modeset_drop_locks(&ctx);
53 * drm_modeset_acquire_fini(&ctx); 53 * drm_modeset_acquire_fini(&ctx);
54 *
55 * On top of of these per-object locks using &ww_mutex there's also an overall
56 * dev->mode_config.lock, for protecting everything else. Mostly this means
57 * probe state of connectors, and preventing hotplug add/removal of connectors.
58 *
59 * Finally there's a bunch of dedicated locks to protect drm core internal
60 * lists and lookup data structures.
54 */ 61 */
55 62
56/** 63/**
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 29d5a548d07a..b2f8f1062d5f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
144} 144}
145EXPORT_SYMBOL(drm_pci_set_busid); 145EXPORT_SYMBOL(drm_pci_set_busid);
146 146
147int drm_pci_set_unique(struct drm_device *dev,
148 struct drm_master *master,
149 struct drm_unique *u)
150{
151 int domain, bus, slot, func, ret;
152
153 master->unique_len = u->unique_len;
154 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
155 if (!master->unique) {
156 ret = -ENOMEM;
157 goto err;
158 }
159
160 if (copy_from_user(master->unique, u->unique, master->unique_len)) {
161 ret = -EFAULT;
162 goto err;
163 }
164
165 master->unique[master->unique_len] = '\0';
166
167 /* Return error if the busid submitted doesn't match the device's actual
168 * busid.
169 */
170 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
171 if (ret != 3) {
172 ret = -EINVAL;
173 goto err;
174 }
175
176 domain = bus >> 8;
177 bus &= 0xff;
178
179 if ((domain != drm_get_pci_domain(dev)) ||
180 (bus != dev->pdev->bus->number) ||
181 (slot != PCI_SLOT(dev->pdev->devfn)) ||
182 (func != PCI_FUNC(dev->pdev->devfn))) {
183 ret = -EINVAL;
184 goto err;
185 }
186 return 0;
187err:
188 return ret;
189}
190
191static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 147static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
192{ 148{
193 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 149 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
444{ 400{
445 return -EINVAL; 401 return -EINVAL;
446} 402}
447
448int drm_pci_set_unique(struct drm_device *dev,
449 struct drm_master *master,
450 struct drm_unique *u)
451{
452 return -EINVAL;
453}
454#endif 403#endif
455 404
456EXPORT_SYMBOL(drm_pci_init); 405EXPORT_SYMBOL(drm_pci_init);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898ff9e..16c4a7bd7465 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
115 * @src: source coordinates in 16.16 fixed point 115 * @src: source coordinates in 16.16 fixed point
116 * @dest: integer destination coordinates 116 * @dest: integer destination coordinates
117 * @clip: integer clipping coordinates 117 * @clip: integer clipping coordinates
118 * @rotation: plane rotation
118 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point 119 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
119 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point 120 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
120 * @can_position: is it legal to position the plane such that it 121 * @can_position: is it legal to position the plane such that it
@@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
134 * Zero if update appears valid, error code on failure 135 * Zero if update appears valid, error code on failure
135 */ 136 */
136int drm_plane_helper_check_update(struct drm_plane *plane, 137int drm_plane_helper_check_update(struct drm_plane *plane,
137 struct drm_crtc *crtc, 138 struct drm_crtc *crtc,
138 struct drm_framebuffer *fb, 139 struct drm_framebuffer *fb,
139 struct drm_rect *src, 140 struct drm_rect *src,
140 struct drm_rect *dest, 141 struct drm_rect *dest,
141 const struct drm_rect *clip, 142 const struct drm_rect *clip,
142 int min_scale, 143 unsigned int rotation,
143 int max_scale, 144 int min_scale,
144 bool can_position, 145 int max_scale,
145 bool can_update_disabled, 146 bool can_position,
146 bool *visible) 147 bool can_update_disabled,
148 bool *visible)
147{ 149{
148 int hscale, vscale; 150 int hscale, vscale;
149 151
@@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
163 return -EINVAL; 165 return -EINVAL;
164 } 166 }
165 167
168 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
169
166 /* Check scaling */ 170 /* Check scaling */
167 hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale); 171 hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
168 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); 172 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
@@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
174 } 178 }
175 179
176 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); 180 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
181
182 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
183
177 if (!*visible) 184 if (!*visible)
178 /* 185 /*
179 * Plane isn't visible; some drivers can handle this 186 * Plane isn't visible; some drivers can handle this
@@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
219 * 226 *
220 * Note that we make some assumptions about hardware limitations that may not be 227 * Note that we make some assumptions about hardware limitations that may not be
221 * true for all hardware -- 228 * true for all hardware --
222 * 1) Primary plane cannot be repositioned. 229 *
223 * 2) Primary plane cannot be scaled. 230 * 1. Primary plane cannot be repositioned.
224 * 3) Primary plane must cover the entire CRTC. 231 * 2. Primary plane cannot be scaled.
225 * 4) Subpixel positioning is not supported. 232 * 3. Primary plane must cover the entire CRTC.
233 * 4. Subpixel positioning is not supported.
234 *
226 * Drivers for hardware that don't have these restrictions can provide their 235 * Drivers for hardware that don't have these restrictions can provide their
227 * own implementation rather than using this helper. 236 * own implementation rather than using this helper.
228 * 237 *
@@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
265 274
266 ret = drm_plane_helper_check_update(plane, crtc, fb, 275 ret = drm_plane_helper_check_update(plane, crtc, fb,
267 &src, &dest, &clip, 276 &src, &dest, &clip,
277 BIT(DRM_ROTATE_0),
268 DRM_PLANE_HELPER_NO_SCALING, 278 DRM_PLANE_HELPER_NO_SCALING,
269 DRM_PLANE_HELPER_NO_SCALING, 279 DRM_PLANE_HELPER_NO_SCALING,
270 false, false, &visible); 280 false, false, &visible);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 644169e1a029..2c819ef90090 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,24 +68,6 @@ err_free:
68 return ret; 68 return ret;
69} 69}
70 70
71int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
72{
73 int id;
74
75 id = dev->platformdev->id;
76 if (id < 0)
77 id = 0;
78
79 master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
80 dev->platformdev->name, id);
81 if (!master->unique)
82 return -ENOMEM;
83
84 master->unique_len = strlen(master->unique);
85 return 0;
86}
87EXPORT_SYMBOL(drm_platform_set_busid);
88
89/** 71/**
90 * drm_platform_init - Register a platform device with the DRM subsystem 72 * drm_platform_init - Register a platform device with the DRM subsystem
91 * @driver: DRM device driver 73 * @driver: DRM device driver
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index aab0f3f1f42d..780589b420a4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
593 get_dma_buf(dma_buf); 593 get_dma_buf(dma_buf);
594 } 594 }
595 595
596 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 596 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
597 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 597 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
598 drm_gem_object_unreference_unlocked(obj); 598 drm_gem_object_unreference_unlocked(obj);
599 if (ret) 599 if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
601 601
602 ret = drm_prime_add_buf_handle(&file_priv->prime, 602 ret = drm_prime_add_buf_handle(&file_priv->prime,
603 dma_buf, *handle); 603 dma_buf, *handle);
604 mutex_unlock(&file_priv->prime.lock);
604 if (ret) 605 if (ret)
605 goto fail; 606 goto fail;
606 607
607 mutex_unlock(&file_priv->prime.lock);
608
609 dma_buf_put(dma_buf); 608 dma_buf_put(dma_buf);
610 609
611 return 0; 610 return 0;
@@ -615,11 +614,14 @@ fail:
615 * to detach.. which seems ok.. 614 * to detach.. which seems ok..
616 */ 615 */
617 drm_gem_handle_delete(file_priv, *handle); 616 drm_gem_handle_delete(file_priv, *handle);
617 dma_buf_put(dma_buf);
618 return ret;
619
618out_unlock: 620out_unlock:
619 mutex_unlock(&dev->object_name_lock); 621 mutex_unlock(&dev->object_name_lock);
620out_put: 622out_put:
621 dma_buf_put(dma_buf);
622 mutex_unlock(&file_priv->prime.lock); 623 mutex_unlock(&file_priv->prime.lock);
624 dma_buf_put(dma_buf);
623 return ret; 625 return ret;
624} 626}
625EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 627EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7f7c..a0df377d7d1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
82 82
83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) 83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
84{ 84{
85 struct drm_cmdline_mode *cmdline_mode;
85 struct drm_display_mode *mode; 86 struct drm_display_mode *mode;
86 87
87 if (!connector->cmdline_mode.specified) 88 cmdline_mode = &connector->cmdline_mode;
89 if (!cmdline_mode->specified)
88 return 0; 90 return 0;
89 91
92 /* Only add a GTF mode if we find no matching probed modes */
93 list_for_each_entry(mode, &connector->probed_modes, head) {
94 if (mode->hdisplay != cmdline_mode->xres ||
95 mode->vdisplay != cmdline_mode->yres)
96 continue;
97
98 if (cmdline_mode->refresh_specified) {
99 /* The probed mode's vrefresh is set until later */
100 if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
101 continue;
102 }
103
104 return 0;
105 }
106
90 mode = drm_mode_create_from_cmdline_mode(connector->dev, 107 mode = drm_mode_create_from_cmdline_mode(connector->dev,
91 &connector->cmdline_mode); 108 cmdline_mode);
92 if (mode == NULL) 109 if (mode == NULL)
93 return 0; 110 return 0;
94 111
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644
index 000000000000..0db36d27e90b
--- /dev/null
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <drm/drmP.h>
11#include <drm/drm_atomic.h>
12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc_helper.h>
14#include <drm/drm_plane_helper.h>
15#include <drm/drm_simple_kms_helper.h>
16#include <linux/slab.h>
17
18/**
19 * DOC: overview
20 *
21 * This helper library provides helpers for drivers for simple display
22 * hardware.
23 *
24 * drm_simple_display_pipe_init() initializes a simple display pipeline
25 * which has only one full-screen scanout buffer feeding one output. The
26 * pipeline is represented by struct &drm_simple_display_pipe and binds
27 * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
28 * entity. Some flexibility for code reuse is provided through a separately
29 * allocated &drm_connector object and supporting optional &drm_bridge
30 * encoder drivers.
31 */
32
33static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
34 .destroy = drm_encoder_cleanup,
35};
36
37static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
38{
39 struct drm_simple_display_pipe *pipe;
40
41 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
42 if (!pipe->funcs || !pipe->funcs->enable)
43 return;
44
45 pipe->funcs->enable(pipe, crtc->state);
46}
47
48static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
49{
50 struct drm_simple_display_pipe *pipe;
51
52 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
53 if (!pipe->funcs || !pipe->funcs->disable)
54 return;
55
56 pipe->funcs->disable(pipe);
57}
58
59static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
60 .disable = drm_simple_kms_crtc_disable,
61 .enable = drm_simple_kms_crtc_enable,
62};
63
64static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
65 .reset = drm_atomic_helper_crtc_reset,
66 .destroy = drm_crtc_cleanup,
67 .set_config = drm_atomic_helper_set_config,
68 .page_flip = drm_atomic_helper_page_flip,
69 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
70 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
71};
72
73static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
74 struct drm_plane_state *plane_state)
75{
76 struct drm_rect src = {
77 .x1 = plane_state->src_x,
78 .y1 = plane_state->src_y,
79 .x2 = plane_state->src_x + plane_state->src_w,
80 .y2 = plane_state->src_y + plane_state->src_h,
81 };
82 struct drm_rect dest = {
83 .x1 = plane_state->crtc_x,
84 .y1 = plane_state->crtc_y,
85 .x2 = plane_state->crtc_x + plane_state->crtc_w,
86 .y2 = plane_state->crtc_y + plane_state->crtc_h,
87 };
88 struct drm_rect clip = { 0 };
89 struct drm_simple_display_pipe *pipe;
90 struct drm_crtc_state *crtc_state;
91 bool visible;
92 int ret;
93
94 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
95 crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
96 &pipe->crtc);
97 if (crtc_state->enable != !!plane_state->crtc)
98 return -EINVAL; /* plane must match crtc enable state */
99
100 if (!crtc_state->enable)
101 return 0; /* nothing to check when disabling or disabled */
102
103 clip.x2 = crtc_state->adjusted_mode.hdisplay;
104 clip.y2 = crtc_state->adjusted_mode.vdisplay;
105 ret = drm_plane_helper_check_update(plane, &pipe->crtc,
106 plane_state->fb,
107 &src, &dest, &clip,
108 plane_state->rotation,
109 DRM_PLANE_HELPER_NO_SCALING,
110 DRM_PLANE_HELPER_NO_SCALING,
111 false, true, &visible);
112 if (ret)
113 return ret;
114
115 if (!visible)
116 return -EINVAL;
117
118 if (!pipe->funcs || !pipe->funcs->check)
119 return 0;
120
121 return pipe->funcs->check(pipe, plane_state, crtc_state);
122}
123
124static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
125 struct drm_plane_state *pstate)
126{
127 struct drm_simple_display_pipe *pipe;
128
129 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
130 if (!pipe->funcs || !pipe->funcs->update)
131 return;
132
133 pipe->funcs->update(pipe, pstate);
134}
135
136static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
137 .atomic_check = drm_simple_kms_plane_atomic_check,
138 .atomic_update = drm_simple_kms_plane_atomic_update,
139};
140
141static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
142 .update_plane = drm_atomic_helper_update_plane,
143 .disable_plane = drm_atomic_helper_disable_plane,
144 .destroy = drm_plane_cleanup,
145 .reset = drm_atomic_helper_plane_reset,
146 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
147 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
148};
149
150/**
151 * drm_simple_display_pipe_init - Initialize a simple display pipeline
152 * @dev: DRM device
153 * @pipe: simple display pipe object to initialize
154 * @funcs: callbacks for the display pipe (optional)
155 * @formats: array of supported formats (%DRM_FORMAT_*)
156 * @format_count: number of elements in @formats
157 * @connector: connector to attach and register
158 *
159 * Sets up a display pipeline which consist of a really simple
160 * plane-crtc-encoder pipe coupled with the provided connector.
161 * Teardown of a simple display pipe is all handled automatically by the drm
162 * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
163 * release the memory for the structure themselves.
164 *
165 * Returns:
166 * Zero on success, negative error code on failure.
167 */
168int drm_simple_display_pipe_init(struct drm_device *dev,
169 struct drm_simple_display_pipe *pipe,
170 const struct drm_simple_display_pipe_funcs *funcs,
171 const uint32_t *formats, unsigned int format_count,
172 struct drm_connector *connector)
173{
174 struct drm_encoder *encoder = &pipe->encoder;
175 struct drm_plane *plane = &pipe->plane;
176 struct drm_crtc *crtc = &pipe->crtc;
177 int ret;
178
179 pipe->connector = connector;
180 pipe->funcs = funcs;
181
182 drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
183 ret = drm_universal_plane_init(dev, plane, 0,
184 &drm_simple_kms_plane_funcs,
185 formats, format_count,
186 DRM_PLANE_TYPE_PRIMARY, NULL);
187 if (ret)
188 return ret;
189
190 drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
191 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
192 &drm_simple_kms_crtc_funcs, NULL);
193 if (ret)
194 return ret;
195
196 encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
197 ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
198 DRM_MODE_ENCODER_NONE, NULL);
199 if (ret)
200 return ret;
201
202 return drm_mode_connector_attach_encoder(connector, encoder);
203}
204EXPORT_SYMBOL(drm_simple_display_pipe_init);
205
206MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fa7fadce8063..32dd821b7202 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
32 32
33struct class *drm_class; 33struct class *drm_class;
34 34
35/**
36 * __drm_class_suspend - internal DRM class suspend routine
37 * @dev: Linux device to suspend
38 * @state: power state to enter
39 *
40 * Just figures out what the actual struct drm_device associated with
41 * @dev is and calls its suspend hook, if present.
42 */
43static int __drm_class_suspend(struct device *dev, pm_message_t state)
44{
45 if (dev->type == &drm_sysfs_device_minor) {
46 struct drm_minor *drm_minor = to_drm_minor(dev);
47 struct drm_device *drm_dev = drm_minor->dev;
48
49 if (drm_minor->type == DRM_MINOR_LEGACY &&
50 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
51 drm_dev->driver->suspend)
52 return drm_dev->driver->suspend(drm_dev, state);
53 }
54 return 0;
55}
56
57/**
58 * drm_class_suspend - internal DRM class suspend hook. Simply calls
59 * __drm_class_suspend() with the correct pm state.
60 * @dev: Linux device to suspend
61 */
62static int drm_class_suspend(struct device *dev)
63{
64 return __drm_class_suspend(dev, PMSG_SUSPEND);
65}
66
67/**
68 * drm_class_freeze - internal DRM class freeze hook. Simply calls
69 * __drm_class_suspend() with the correct pm state.
70 * @dev: Linux device to freeze
71 */
72static int drm_class_freeze(struct device *dev)
73{
74 return __drm_class_suspend(dev, PMSG_FREEZE);
75}
76
77/**
78 * drm_class_resume - DRM class resume hook
79 * @dev: Linux device to resume
80 *
81 * Just figures out what the actual struct drm_device associated with
82 * @dev is and calls its resume hook, if present.
83 */
84static int drm_class_resume(struct device *dev)
85{
86 if (dev->type == &drm_sysfs_device_minor) {
87 struct drm_minor *drm_minor = to_drm_minor(dev);
88 struct drm_device *drm_dev = drm_minor->dev;
89
90 if (drm_minor->type == DRM_MINOR_LEGACY &&
91 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
92 drm_dev->driver->resume)
93 return drm_dev->driver->resume(drm_dev);
94 }
95 return 0;
96}
97
98static const struct dev_pm_ops drm_class_dev_pm_ops = {
99 .suspend = drm_class_suspend,
100 .resume = drm_class_resume,
101 .freeze = drm_class_freeze,
102};
103
104static char *drm_devnode(struct device *dev, umode_t *mode) 35static char *drm_devnode(struct device *dev, umode_t *mode)
105{ 36{
106 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 37 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
131 if (IS_ERR(drm_class)) 62 if (IS_ERR(drm_class))
132 return PTR_ERR(drm_class); 63 return PTR_ERR(drm_class);
133 64
134 drm_class->pm = &drm_class_dev_pm_ops;
135
136 err = class_create_file(drm_class, &class_attr_version.attr); 65 err = class_create_file(drm_class, &class_attr_version.attr);
137 if (err) { 66 if (err) {
138 class_destroy(drm_class); 67 class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index ac9f4b3ec615..43ff44a2b8e7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev)
670 kfree(vma); 670 kfree(vma);
671 } 671 }
672} 672}
673
674int drm_vma_info(struct seq_file *m, void *data)
675{
676 struct drm_info_node *node = (struct drm_info_node *) m->private;
677 struct drm_device *dev = node->minor->dev;
678 struct drm_vma_entry *pt;
679 struct vm_area_struct *vma;
680 unsigned long vma_count = 0;
681#if defined(__i386__)
682 unsigned int pgprot;
683#endif
684
685 mutex_lock(&dev->struct_mutex);
686 list_for_each_entry(pt, &dev->vmalist, head)
687 vma_count++;
688
689 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
690 vma_count, high_memory,
691 (void *)(unsigned long)virt_to_phys(high_memory));
692
693 list_for_each_entry(pt, &dev->vmalist, head) {
694 vma = pt->vma;
695 if (!vma)
696 continue;
697 seq_printf(m,
698 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
699 pt->pid,
700 (void *)vma->vm_start, (void *)vma->vm_end,
701 vma->vm_flags & VM_READ ? 'r' : '-',
702 vma->vm_flags & VM_WRITE ? 'w' : '-',
703 vma->vm_flags & VM_EXEC ? 'x' : '-',
704 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
705 vma->vm_flags & VM_LOCKED ? 'l' : '-',
706 vma->vm_flags & VM_IO ? 'i' : '-',
707 vma->vm_pgoff);
708
709#if defined(__i386__)
710 pgprot = pgprot_val(vma->vm_page_prot);
711 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
712 pgprot & _PAGE_PRESENT ? 'p' : '-',
713 pgprot & _PAGE_RW ? 'w' : 'r',
714 pgprot & _PAGE_USER ? 'u' : 's',
715 pgprot & _PAGE_PWT ? 't' : 'b',
716 pgprot & _PAGE_PCD ? 'u' : 'c',
717 pgprot & _PAGE_ACCESSED ? 'a' : '-',
718 pgprot & _PAGE_DIRTY ? 'd' : '-',
719 pgprot & _PAGE_PSE ? 'm' : 'k',
720 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
721#endif
722 seq_printf(m, "\n");
723 }
724 mutex_unlock(&dev->struct_mutex);
725 return 0;
726}
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..f306c8855978 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
127 * used to implement weakly referenced lookups using kref_get_unless_zero(). 127 * used to implement weakly referenced lookups using kref_get_unless_zero().
128 * 128 *
129 * Example: 129 * Example:
130 *
131 * ::
132 *
130 * drm_vma_offset_lock_lookup(mgr); 133 * drm_vma_offset_lock_lookup(mgr);
131 * node = drm_vma_offset_lookup_locked(mgr); 134 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node) 135 * if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 3d4f56df8359..340d390306d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -496,7 +496,6 @@ static struct drm_driver etnaviv_drm_driver = {
496 DRIVER_RENDER, 496 DRIVER_RENDER,
497 .open = etnaviv_open, 497 .open = etnaviv_open,
498 .preclose = etnaviv_preclose, 498 .preclose = etnaviv_preclose,
499 .set_busid = drm_platform_set_busid,
500 .gem_free_object_unlocked = etnaviv_gem_free_object, 499 .gem_free_object_unlocked = etnaviv_gem_free_object,
501 .gem_vm_ops = &vm_ops, 500 .gem_vm_ops = &vm_ops,
502 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 501 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
125 u32 completed_fence; 125 u32 completed_fence;
126 u32 retired_fence; 126 u32 retired_fence;
127 wait_queue_head_t fence_event; 127 wait_queue_head_t fence_event;
128 unsigned int fence_context; 128 u64 fence_context;
129 spinlock_t fence_spinlock; 129 spinlock_t fence_spinlock;
130 130
131 /* worker for handling active-list retiring: */ 131 /* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 522cfd447892..16353ee81651 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
225 225
226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
228 etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
228 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; 229 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
229 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; 230 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
230 231
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f6223f907c15..7f9901b7777b 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -31,7 +31,6 @@
31#include "exynos_drm_plane.h" 31#include "exynos_drm_plane.h"
32#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 33#include "exynos_drm_fb.h"
34#include "exynos_drm_fbdev.h"
35#include "exynos_drm_iommu.h" 34#include "exynos_drm_iommu.h"
36 35
37/* 36/*
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 468498e3fec1..4c1fb3f8b5a6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -34,7 +34,7 @@
34 34
35struct exynos_dp_device { 35struct exynos_dp_device {
36 struct drm_encoder encoder; 36 struct drm_encoder encoder;
37 struct drm_connector connector; 37 struct drm_connector *connector;
38 struct drm_bridge *ptn_bridge; 38 struct drm_bridge *ptn_bridge;
39 struct drm_device *drm_dev; 39 struct drm_device *drm_dev;
40 struct device *dev; 40 struct device *dev;
@@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) 70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
71{ 71{
72 struct exynos_dp_device *dp = to_dp(plat_data); 72 struct exynos_dp_device *dp = to_dp(plat_data);
73 struct drm_connector *connector = &dp->connector; 73 struct drm_connector *connector = dp->connector;
74 struct drm_display_mode *mode; 74 struct drm_display_mode *mode;
75 int num_modes = 0; 75 int num_modes = 0;
76 76
@@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
103 int ret; 103 int ret;
104 104
105 drm_connector_register(connector); 105 drm_connector_register(connector);
106 dp->connector = connector;
106 107
107 /* Pre-empt DP connector creation if there's a bridge */ 108 /* Pre-empt DP connector creation if there's a bridge */
108 if (dp->ptn_bridge) { 109 if (dp->ptn_bridge) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 011211e4167d..edbd98ff293e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include "exynos_drm_drv.h" 16#include "exynos_drm_drv.h"
17#include "exynos_drm_crtc.h" 17#include "exynos_drm_crtc.h"
18#include "exynos_drm_fbdev.h"
19 18
20static LIST_HEAD(exynos_drm_subdrv_list); 19static LIST_HEAD(exynos_drm_subdrv_list);
21 20
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 5e38e749ac17..ad6b73c7fc59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
93 return 0; 93 return 0;
94} 94}
95 95
96static struct drm_encoder *
97exynos_dpi_best_encoder(struct drm_connector *connector)
98{
99 struct exynos_dpi *ctx = connector_to_dpi(connector);
100
101 return &ctx->encoder;
102}
103
104static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 96static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
105 .get_modes = exynos_dpi_get_modes, 97 .get_modes = exynos_dpi_get_modes,
106 .best_encoder = exynos_dpi_best_encoder,
107}; 98};
108 99
109static int exynos_dpi_create_connector(struct drm_encoder *encoder) 100static int exynos_dpi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23b0c..13d28d4229e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -267,6 +267,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
267{ 267{
268 struct exynos_drm_private *priv = dev->dev_private; 268 struct exynos_drm_private *priv = dev->dev_private;
269 struct exynos_atomic_commit *commit; 269 struct exynos_atomic_commit *commit;
270 struct drm_crtc *crtc;
271 struct drm_crtc_state *crtc_state;
270 int i, ret; 272 int i, ret;
271 273
272 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 274 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +290,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
288 /* Wait until all affected CRTCs have completed previous commits and 290 /* Wait until all affected CRTCs have completed previous commits and
289 * mark them as pending. 291 * mark them as pending.
290 */ 292 */
291 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 293 for_each_crtc_in_state(state, crtc, crtc_state, i)
292 if (state->crtcs[i]) 294 commit->crtcs |= drm_crtc_mask(crtc);
293 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
294 }
295 295
296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); 296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
297 297
@@ -299,7 +299,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
299 priv->pending |= commit->crtcs; 299 priv->pending |= commit->crtcs;
300 spin_unlock(&priv->lock); 300 spin_unlock(&priv->lock);
301 301
302 drm_atomic_helper_swap_state(dev, state); 302 drm_atomic_helper_swap_state(state, true);
303 303
304 if (nonblock) 304 if (nonblock)
305 schedule_work(&commit->work); 305 schedule_work(&commit->work);
@@ -407,7 +407,6 @@ static struct drm_driver exynos_drm_driver = {
407 .preclose = exynos_drm_preclose, 407 .preclose = exynos_drm_preclose,
408 .lastclose = exynos_drm_lastclose, 408 .lastclose = exynos_drm_lastclose,
409 .postclose = exynos_drm_postclose, 409 .postclose = exynos_drm_postclose,
410 .set_busid = drm_platform_set_busid,
411 .get_vblank_counter = drm_vblank_no_hw_counter, 410 .get_vblank_counter = drm_vblank_no_hw_counter,
412 .enable_vblank = exynos_drm_crtc_enable_vblank, 411 .enable_vblank = exynos_drm_crtc_enable_vblank,
413 .disable_vblank = exynos_drm_crtc_disable_vblank, 412 .disable_vblank = exynos_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 601ecf8006a7..e07cb1fe4860 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
1566 return 0; 1566 return 0;
1567} 1567}
1568 1568
1569static struct drm_encoder *
1570exynos_dsi_best_encoder(struct drm_connector *connector)
1571{
1572 struct exynos_dsi *dsi = connector_to_dsi(connector);
1573
1574 return &dsi->encoder;
1575}
1576
1577static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1569static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1578 .get_modes = exynos_dsi_get_modes, 1570 .get_modes = exynos_dsi_get_modes,
1579 .best_encoder = exynos_dsi_best_encoder,
1580}; 1571};
1581 1572
1582static int exynos_dsi_create_connector(struct drm_encoder *encoder) 1573static int exynos_dsi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3efe1aa89416..d47216488985 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -30,7 +30,6 @@
30 30
31#include "exynos_drm_drv.h" 31#include "exynos_drm_drv.h"
32#include "exynos_drm_fb.h" 32#include "exynos_drm_fb.h"
33#include "exynos_drm_fbdev.h"
34#include "exynos_drm_crtc.h" 33#include "exynos_drm_crtc.h"
35#include "exynos_drm_plane.h" 34#include "exynos_drm_plane.h"
36#include "exynos_drm_iommu.h" 35#include "exynos_drm_iommu.h"
@@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
120 .timing_base = 0x0, 119 .timing_base = 0x0,
121 .has_clksel = 1, 120 .has_clksel = 1,
122 .has_limited_fmt = 1, 121 .has_limited_fmt = 1,
123 .has_hw_trigger = 1,
124}; 122};
125 123
126static struct fimd_driver_data exynos3_fimd_driver_data = { 124static struct fimd_driver_data exynos3_fimd_driver_data = {
@@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
171 .lcdblk_vt_shift = 24, 169 .lcdblk_vt_shift = 24,
172 .lcdblk_bypass_shift = 15, 170 .lcdblk_bypass_shift = 15,
173 .lcdblk_mic_bypass_shift = 11, 171 .lcdblk_mic_bypass_shift = 11,
174 .trg_type = I80_HW_TRG,
175 .has_shadowcon = 1, 172 .has_shadowcon = 1,
176 .has_vidoutcon = 1, 173 .has_vidoutcon = 1,
177 .has_vtsel = 1, 174 .has_vtsel = 1,
178 .has_mic_bypass = 1, 175 .has_mic_bypass = 1,
179 .has_dp_clk = 1, 176 .has_dp_clk = 1,
180 .has_hw_trigger = 1,
181 .has_trigger_per_te = 1,
182}; 177};
183 178
184struct fimd_context { 179struct fimd_context {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 493552368295..8564c3da0d22 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,13 +48,13 @@
48 48
49/* registers for base address */ 49/* registers for base address */
50#define G2D_SRC_BASE_ADDR 0x0304 50#define G2D_SRC_BASE_ADDR 0x0304
51#define G2D_SRC_STRIDE_REG 0x0308 51#define G2D_SRC_STRIDE 0x0308
52#define G2D_SRC_COLOR_MODE 0x030C 52#define G2D_SRC_COLOR_MODE 0x030C
53#define G2D_SRC_LEFT_TOP 0x0310 53#define G2D_SRC_LEFT_TOP 0x0310
54#define G2D_SRC_RIGHT_BOTTOM 0x0314 54#define G2D_SRC_RIGHT_BOTTOM 0x0314
55#define G2D_SRC_PLANE2_BASE_ADDR 0x0318 55#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
56#define G2D_DST_BASE_ADDR 0x0404 56#define G2D_DST_BASE_ADDR 0x0404
57#define G2D_DST_STRIDE_REG 0x0408 57#define G2D_DST_STRIDE 0x0408
58#define G2D_DST_COLOR_MODE 0x040C 58#define G2D_DST_COLOR_MODE 0x040C
59#define G2D_DST_LEFT_TOP 0x0410 59#define G2D_DST_LEFT_TOP 0x0410
60#define G2D_DST_RIGHT_BOTTOM 0x0414 60#define G2D_DST_RIGHT_BOTTOM 0x0414
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
563 563
564 switch (reg_offset) { 564 switch (reg_offset) {
565 case G2D_SRC_BASE_ADDR: 565 case G2D_SRC_BASE_ADDR:
566 case G2D_SRC_STRIDE_REG: 566 case G2D_SRC_STRIDE:
567 case G2D_SRC_COLOR_MODE: 567 case G2D_SRC_COLOR_MODE:
568 case G2D_SRC_LEFT_TOP: 568 case G2D_SRC_LEFT_TOP:
569 case G2D_SRC_RIGHT_BOTTOM: 569 case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
573 reg_type = REG_TYPE_SRC_PLANE2; 573 reg_type = REG_TYPE_SRC_PLANE2;
574 break; 574 break;
575 case G2D_DST_BASE_ADDR: 575 case G2D_DST_BASE_ADDR:
576 case G2D_DST_STRIDE_REG: 576 case G2D_DST_STRIDE:
577 case G2D_DST_COLOR_MODE: 577 case G2D_DST_COLOR_MODE:
578 case G2D_DST_LEFT_TOP: 578 case G2D_DST_LEFT_TOP:
579 case G2D_DST_RIGHT_BOTTOM: 579 case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
968 } else 968 } else
969 buf_info->types[reg_type] = BUF_TYPE_GEM; 969 buf_info->types[reg_type] = BUF_TYPE_GEM;
970 break; 970 break;
971 case G2D_SRC_STRIDE_REG: 971 case G2D_SRC_STRIDE:
972 case G2D_DST_STRIDE_REG: 972 case G2D_DST_STRIDE:
973 if (for_addr) 973 if (for_addr)
974 goto err; 974 goto err;
975 975
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 55f1d37c666a..77f12c00abf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
242 state->v_ratio == (1 << 15)) 242 state->v_ratio == (1 << 15))
243 height_ok = true; 243 height_ok = true;
244 244
245 if (width_ok & height_ok) 245 if (width_ok && height_ok)
246 return 0; 246 return 0;
247 247
248 DRM_DEBUG_KMS("scaling mode is not supported"); 248 DRM_DEBUG_KMS("scaling mode is not supported");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 608b0afa337f..e8f6c92b2a36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
378 return drm_add_edid_modes(connector, edid); 378 return drm_add_edid_modes(connector, edid);
379} 379}
380 380
381static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
382{
383 struct vidi_context *ctx = ctx_from_connector(connector);
384
385 return &ctx->encoder;
386}
387
388static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 381static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
389 .get_modes = vidi_get_modes, 382 .get_modes = vidi_get_modes,
390 .best_encoder = vidi_best_encoder,
391}; 383};
392 384
393static int vidi_create_connector(struct drm_encoder *encoder) 385static int vidi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 58de5a430508..1625d7c8a319 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
937 return MODE_OK; 937 return MODE_OK;
938} 938}
939 939
940static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
941{
942 struct hdmi_context *hdata = connector_to_hdmi(connector);
943
944 return &hdata->encoder;
945}
946
947static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 940static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
948 .get_modes = hdmi_get_modes, 941 .get_modes = hdmi_get_modes,
949 .mode_valid = hdmi_mode_valid, 942 .mode_valid = hdmi_mode_valid,
950 .best_encoder = hdmi_best_encoder,
951}; 943};
952 944
953static int hdmi_create_connector(struct drm_encoder *encoder) 945static int hdmi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 89c0084c2814..3371635cd4d7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -22,20 +22,21 @@
22#include "fsl_dcu_drm_drv.h" 22#include "fsl_dcu_drm_drv.h"
23#include "fsl_dcu_drm_plane.h" 23#include "fsl_dcu_drm_plane.h"
24 24
25static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, 25static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
26 struct drm_crtc_state *old_crtc_state) 26 struct drm_crtc_state *old_crtc_state)
27{ 27{
28} 28 struct drm_pending_vblank_event *event = crtc->state->event;
29 29
30static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, 30 if (event) {
31 struct drm_crtc_state *state) 31 crtc->state->event = NULL;
32{
33 return 0;
34}
35 32
36static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 33 spin_lock_irq(&crtc->dev->event_lock);
37 struct drm_crtc_state *old_crtc_state) 34 if (drm_crtc_vblank_get(crtc) == 0)
38{ 35 drm_crtc_arm_vblank_event(crtc, event);
36 else
37 drm_crtc_send_vblank_event(crtc, event);
38 spin_unlock_irq(&crtc->dev->event_lock);
39 }
39} 40}
40 41
41static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 42static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
43 struct drm_device *dev = crtc->dev; 44 struct drm_device *dev = crtc->dev;
44 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 45 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
45 46
47 drm_crtc_vblank_off(crtc);
48
46 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 49 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
47 DCU_MODE_DCU_MODE_MASK, 50 DCU_MODE_DCU_MODE_MASK,
48 DCU_MODE_DCU_MODE(DCU_MODE_OFF)); 51 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
60 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 63 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
61 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 64 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
62 DCU_UPDATE_MODE_READREG); 65 DCU_UPDATE_MODE_READREG);
66
67 drm_crtc_vblank_on(crtc);
63} 68}
64 69
65static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 70static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
117} 122}
118 123
119static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 124static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
120 .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
121 .atomic_check = fsl_dcu_drm_crtc_atomic_check,
122 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 125 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
123 .disable = fsl_dcu_drm_disable_crtc, 126 .disable = fsl_dcu_drm_disable_crtc,
124 .enable = fsl_dcu_drm_crtc_enable, 127 .enable = fsl_dcu_drm_crtc_enable,
@@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
138{ 141{
139 struct drm_plane *primary; 142 struct drm_plane *primary;
140 struct drm_crtc *crtc = &fsl_dev->crtc; 143 struct drm_crtc *crtc = &fsl_dev->crtc;
141 unsigned int i, j, reg_num;
142 int ret; 144 int ret;
143 145
146 fsl_dcu_drm_init_planes(fsl_dev->drm);
147
144 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); 148 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
145 if (!primary) 149 if (!primary)
146 return -ENOMEM; 150 return -ENOMEM;
@@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
154 158
155 drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); 159 drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
156 160
157 if (!strcmp(fsl_dev->soc->name, "ls1021a"))
158 reg_num = LS1021A_LAYER_REG_NUM;
159 else
160 reg_num = VF610_LAYER_REG_NUM;
161 for (i = 0; i < fsl_dev->soc->total_layer; i++) {
162 for (j = 1; j <= reg_num; j++)
163 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
164 }
165 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
166 DCU_MODE_DCU_MODE_MASK,
167 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
168 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
169 DCU_UPDATE_MODE_READREG);
170
171 return 0; 161 return 0;
172} 162}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0ec1ad961e0d..7882387f9bff 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
14#include <linux/console.h>
14#include <linux/io.h> 15#include <linux/io.h>
15#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
@@ -22,6 +23,7 @@
22#include <linux/regmap.h> 23#include <linux/regmap.h>
23 24
24#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_atomic_helper.h>
25#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
26#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
@@ -42,7 +44,6 @@ static const struct regmap_config fsl_dcu_regmap_config = {
42 .reg_bits = 32, 44 .reg_bits = 32,
43 .reg_stride = 4, 45 .reg_stride = 4,
44 .val_bits = 32, 46 .val_bits = 32,
45 .cache_type = REGCACHE_RBTREE,
46 47
47 .volatile_reg = fsl_dcu_drm_is_volatile_reg, 48 .volatile_reg = fsl_dcu_drm_is_volatile_reg,
48}; 49};
@@ -198,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
198 .get_vblank_counter = drm_vblank_no_hw_counter, 199 .get_vblank_counter = drm_vblank_no_hw_counter,
199 .enable_vblank = fsl_dcu_drm_enable_vblank, 200 .enable_vblank = fsl_dcu_drm_enable_vblank,
200 .disable_vblank = fsl_dcu_drm_disable_vblank, 201 .disable_vblank = fsl_dcu_drm_disable_vblank,
201 .gem_free_object = drm_gem_cma_free_object, 202 .gem_free_object_unlocked = drm_gem_cma_free_object,
202 .gem_vm_ops = &drm_gem_cma_vm_ops, 203 .gem_vm_ops = &drm_gem_cma_vm_ops,
203 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 204 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
204 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 205 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -228,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
228 if (!fsl_dev) 229 if (!fsl_dev)
229 return 0; 230 return 0;
230 231
232 disable_irq(fsl_dev->irq);
231 drm_kms_helper_poll_disable(fsl_dev->drm); 233 drm_kms_helper_poll_disable(fsl_dev->drm);
232 regcache_cache_only(fsl_dev->regmap, true); 234
233 regcache_mark_dirty(fsl_dev->regmap); 235 console_lock();
234 clk_disable(fsl_dev->clk); 236 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
235 clk_unprepare(fsl_dev->clk); 237 console_unlock();
238
239 fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
240 if (IS_ERR(fsl_dev->state)) {
241 console_lock();
242 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
243 console_unlock();
244
245 drm_kms_helper_poll_enable(fsl_dev->drm);
246 enable_irq(fsl_dev->irq);
247 return PTR_ERR(fsl_dev->state);
248 }
249
250 clk_disable_unprepare(fsl_dev->pix_clk);
251 clk_disable_unprepare(fsl_dev->clk);
236 252
237 return 0; 253 return 0;
238} 254}
@@ -245,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
245 if (!fsl_dev) 261 if (!fsl_dev)
246 return 0; 262 return 0;
247 263
248 ret = clk_enable(fsl_dev->clk); 264 ret = clk_prepare_enable(fsl_dev->clk);
249 if (ret < 0) { 265 if (ret < 0) {
250 dev_err(dev, "failed to enable dcu clk\n"); 266 dev_err(dev, "failed to enable dcu clk\n");
251 clk_unprepare(fsl_dev->clk);
252 return ret; 267 return ret;
253 } 268 }
254 ret = clk_prepare(fsl_dev->clk); 269
270 ret = clk_prepare_enable(fsl_dev->pix_clk);
255 if (ret < 0) { 271 if (ret < 0) {
256 dev_err(dev, "failed to prepare dcu clk\n"); 272 dev_err(dev, "failed to enable pix clk\n");
257 return ret; 273 return ret;
258 } 274 }
259 275
276 fsl_dcu_drm_init_planes(fsl_dev->drm);
277 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
278
279 console_lock();
280 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
281 console_unlock();
282
260 drm_kms_helper_poll_enable(fsl_dev->drm); 283 drm_kms_helper_poll_enable(fsl_dev->drm);
261 regcache_cache_only(fsl_dev->regmap, false); 284 enable_irq(fsl_dev->irq);
262 regcache_sync(fsl_dev->regmap);
263 285
264 return 0; 286 return 0;
265} 287}
@@ -273,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
273 .name = "ls1021a", 295 .name = "ls1021a",
274 .total_layer = 16, 296 .total_layer = 16,
275 .max_layer = 4, 297 .max_layer = 4,
298 .layer_regs = LS1021A_LAYER_REG_NUM,
276}; 299};
277 300
278static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { 301static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
279 .name = "vf610", 302 .name = "vf610",
280 .total_layer = 64, 303 .total_layer = 64,
281 .max_layer = 6, 304 .max_layer = 6,
305 .layer_regs = VF610_LAYER_REG_NUM,
282}; 306};
283 307
284static const struct of_device_id fsl_dcu_of_match[] = { 308static const struct of_device_id fsl_dcu_of_match[] = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index c275f900ff23..3b371fe7491e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
175 unsigned int total_layer; 175 unsigned int total_layer;
176 /*max layer number DCU supported*/ 176 /*max layer number DCU supported*/
177 unsigned int max_layer; 177 unsigned int max_layer;
178 unsigned int layer_regs;
178}; 179};
179 180
180struct fsl_dcu_drm_device { 181struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
193 struct drm_encoder encoder; 194 struct drm_encoder encoder;
194 struct fsl_dcu_drm_connector connector; 195 struct fsl_dcu_drm_connector connector;
195 const struct fsl_dcu_soc_data *soc; 196 const struct fsl_dcu_soc_data *soc;
197 struct drm_atomic_state *state;
196}; 198};
197 199
198void fsl_dcu_fbdev_init(struct drm_device *dev); 200void fsl_dcu_fbdev_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index c564ec612b59..a6e4cd591960 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
37 37
38 ret = fsl_dcu_drm_crtc_create(fsl_dev); 38 ret = fsl_dcu_drm_crtc_create(fsl_dev);
39 if (ret) 39 if (ret)
40 return ret; 40 goto err;
41 41
42 ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); 42 ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
43 if (ret) 43 if (ret)
44 goto fail_encoder; 44 goto err;
45 45
46 ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); 46 ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
47 if (ret) 47 if (ret)
48 goto fail_connector; 48 goto err;
49 49
50 drm_mode_config_reset(fsl_dev->drm); 50 drm_mode_config_reset(fsl_dev->drm);
51 drm_kms_helper_poll_init(fsl_dev->drm); 51 drm_kms_helper_poll_init(fsl_dev->drm);
52 52
53 return 0; 53 return 0;
54fail_encoder: 54
55 fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc); 55err:
56fail_connector: 56 drm_mode_config_cleanup(fsl_dev->drm);
57 fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
58 return ret; 57 return ret;
59} 58}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 274558b3b32b..e50467a0deb0 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
217 DRM_FORMAT_YUV422, 217 DRM_FORMAT_YUV422,
218}; 218};
219 219
220void fsl_dcu_drm_init_planes(struct drm_device *dev)
221{
222 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
223 int i, j;
224
225 for (i = 0; i < fsl_dev->soc->total_layer; i++) {
226 for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
227 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
228 }
229 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
230 DCU_MODE_DCU_MODE_MASK,
231 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
232 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
233 DCU_UPDATE_MODE_READREG);
234}
235
220struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) 236struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
221{ 237{
222 struct drm_plane *primary; 238 struct drm_plane *primary;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
index d657f088d859..8ee45f813ee8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -12,6 +12,7 @@
12#ifndef __FSL_DCU_DRM_PLANE_H__ 12#ifndef __FSL_DCU_DRM_PLANE_H__
13#define __FSL_DCU_DRM_PLANE_H__ 13#define __FSL_DCU_DRM_PLANE_H__
14 14
15void fsl_dcu_drm_init_planes(struct drm_device *dev);
15struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev); 16struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
16 17
17#endif /* __FSL_DCU_DRM_PLANE_H__ */ 18#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 98c998da91eb..0b0989e503ea 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,14 +102,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
102 .reset = drm_atomic_helper_connector_reset, 102 .reset = drm_atomic_helper_connector_reset,
103}; 103};
104 104
105static struct drm_encoder *
106fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
107{
108 struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
109
110 return fsl_con->encoder;
111}
112
113static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) 105static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
114{ 106{
115 struct fsl_dcu_drm_connector *fsl_connector; 107 struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,7 +128,6 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
136} 128}
137 129
138static const struct drm_connector_helper_funcs connector_helper_funcs = { 130static const struct drm_connector_helper_funcs connector_helper_funcs = {
139 .best_encoder = fsl_dcu_drm_connector_best_encoder,
140 .get_modes = fsl_dcu_drm_connector_get_modes, 131 .get_modes = fsl_dcu_drm_connector_get_modes,
141 .mode_valid = fsl_dcu_drm_connector_mode_valid, 132 .mode_valid = fsl_dcu_drm_connector_mode_valid,
142}; 133};
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f44d..1a1cf7a3b5ef 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
175 } 175 }
176} 176}
177 177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 178int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size) 179 u32 size)
180{ 180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i; 182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184 183
185 for (i = start; i < end; i++) { 184 for (i = 0; i < size; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8; 185 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8; 186 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8; 187 gma_crtc->lut_b[i] = blue[i] >> 8;
189 } 188 }
190 189
191 gma_crtc_load_lut(crtc); 190 gma_crtc_load_lut(crtc);
191
192 return 0;
192} 193}
193 194
194/** 195/**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 282 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282 283
283 /* Turn off vblank interrupts */ 284 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe); 285 drm_crtc_vblank_off(crtc);
285 286
286 /* Wait for vblank for the disable to take effect */ 287 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev); 288 gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f053..e72dd08b701b 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
72 uint32_t width, uint32_t height); 72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc); 74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 75extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size); 76 u16 *blue, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); 77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern void gma_crtc_prepare(struct drm_crtc *crtc); 78extern void gma_crtc_prepare(struct drm_crtc *crtc);
79extern void gma_crtc_commit(struct drm_crtc *crtc); 79extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be87e4..7b6c84925098 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
491 struct drm_psb_private *dev_priv = dev->dev_private; 491 struct drm_psb_private *dev_priv = dev->dev_private;
492 struct gma_crtc *gma_crtc; 492 struct gma_crtc *gma_crtc;
493 int i; 493 int i;
494 uint16_t *r_base, *g_base, *b_base;
495 494
496 /* We allocate a extra array of drm_connector pointers 495 /* We allocate a extra array of drm_connector pointers
497 * for fbdev after the crtc */ 496 * for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
519 gma_crtc->pipe = pipe; 518 gma_crtc->pipe = pipe;
520 gma_crtc->plane = pipe; 519 gma_crtc->plane = pipe;
521 520
522 r_base = gma_crtc->base.gamma_store;
523 g_base = r_base + 256;
524 b_base = g_base + 256;
525 for (i = 0; i < 256; i++) { 521 for (i = 0; i < 256; i++) {
526 gma_crtc->lut_r[i] = i; 522 gma_crtc->lut_r[i] = i;
527 gma_crtc->lut_g[i] = i; 523 gma_crtc->lut_g[i] = i;
528 gma_crtc->lut_b[i] = i; 524 gma_crtc->lut_b[i] = i;
529 r_base[i] = i << 8;
530 g_base[i] = i << 8;
531 b_base[i] = i << 8;
532 525
533 gma_crtc->lut_adj[i] = 0; 526 gma_crtc->lut_adj[i] = 0;
534 } 527 }
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index fba6372d060e..ed76baad525f 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -502,13 +502,6 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
502 acrtc->enable = false; 502 acrtc->enable = false;
503} 503}
504 504
505static int ade_crtc_atomic_check(struct drm_crtc *crtc,
506 struct drm_crtc_state *state)
507{
508 /* do nothing */
509 return 0;
510}
511
512static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) 505static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
513{ 506{
514 struct ade_crtc *acrtc = to_ade_crtc(crtc); 507 struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +530,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
537{ 530{
538 struct ade_crtc *acrtc = to_ade_crtc(crtc); 531 struct ade_crtc *acrtc = to_ade_crtc(crtc);
539 struct ade_hw_ctx *ctx = acrtc->ctx; 532 struct ade_hw_ctx *ctx = acrtc->ctx;
533 struct drm_pending_vblank_event *event = crtc->state->event;
540 void __iomem *base = ctx->base; 534 void __iomem *base = ctx->base;
541 535
542 /* only crtc is enabled regs take effect */ 536 /* only crtc is enabled regs take effect */
@@ -545,12 +539,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
545 /* flush ade registers */ 539 /* flush ade registers */
546 writel(ADE_ENABLE, base + ADE_EN); 540 writel(ADE_ENABLE, base + ADE_EN);
547 } 541 }
542
543 if (event) {
544 crtc->state->event = NULL;
545
546 spin_lock_irq(&crtc->dev->event_lock);
547 if (drm_crtc_vblank_get(crtc) == 0)
548 drm_crtc_arm_vblank_event(crtc, event);
549 else
550 drm_crtc_send_vblank_event(crtc, event);
551 spin_unlock_irq(&crtc->dev->event_lock);
552 }
548} 553}
549 554
550static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { 555static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
551 .enable = ade_crtc_enable, 556 .enable = ade_crtc_enable,
552 .disable = ade_crtc_disable, 557 .disable = ade_crtc_disable,
553 .atomic_check = ade_crtc_atomic_check,
554 .mode_set_nofb = ade_crtc_mode_set_nofb, 558 .mode_set_nofb = ade_crtc_mode_set_nofb,
555 .atomic_begin = ade_crtc_atomic_begin, 559 .atomic_begin = ade_crtc_atomic_begin,
556 .atomic_flush = ade_crtc_atomic_flush, 560 .atomic_flush = ade_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fbcca..1edd9bc80294 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = {
171 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 171 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
172 DRIVER_ATOMIC | DRIVER_HAVE_IRQ, 172 DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
173 .fops = &kirin_drm_fops, 173 .fops = &kirin_drm_fops,
174 .set_busid = drm_platform_set_busid,
175 174
176 .gem_free_object = drm_gem_cma_free_object, 175 .gem_free_object_unlocked = drm_gem_cma_free_object,
177 .gem_vm_ops = &drm_gem_cma_vm_ops, 176 .gem_vm_ops = &drm_gem_cma_vm_ops,
178 .dumb_create = kirin_gem_cma_dumb_create, 177 .dumb_create = kirin_gem_cma_dumb_create,
179 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 178 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev)
221 if (ret) 220 if (ret)
222 goto err_kms_cleanup; 221 goto err_kms_cleanup;
223 222
224 /* connectors should be registered after drm device register */
225 ret = drm_connector_register_all(drm_dev);
226 if (ret)
227 goto err_drm_dev_unregister;
228
229 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 223 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
230 driver->name, driver->major, driver->minor, driver->patchlevel, 224 driver->name, driver->major, driver->minor, driver->patchlevel,
231 driver->date, drm_dev->primary->index); 225 driver->date, drm_dev->primary->index);
232 226
233 return 0; 227 return 0;
234 228
235err_drm_dev_unregister:
236 drm_dev_unregister(drm_dev);
237err_kms_cleanup: 229err_kms_cleanup:
238 kirin_drm_kms_cleanup(drm_dev); 230 kirin_drm_kms_cleanup(drm_dev);
239err_drm_dev_unref: 231err_drm_dev_unref:
@@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev)
246{ 238{
247 struct drm_device *drm_dev = dev_get_drvdata(dev); 239 struct drm_device *drm_dev = dev_get_drvdata(dev);
248 240
249 drm_connector_unregister_all(drm_dev);
250 drm_dev_unregister(drm_dev); 241 drm_dev_unregister(drm_dev);
251 kirin_drm_kms_cleanup(drm_dev); 242 kirin_drm_kms_cleanup(drm_dev);
252 drm_dev_unref(drm_dev); 243 drm_dev_unref(drm_dev);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 29a32b11953b..7769e469118f 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
57 57
58 If in doubt, say "Y". 58 If in doubt, say "Y".
59 59
60config DRM_I915_GVT
61 bool "Enable Intel GVT-g graphics virtualization host support"
62 depends on DRM_I915
63 default n
64 help
65 Choose this option if you want to enable Intel GVT-g graphics
66 virtualization technology host support with integrated graphics.
67 With GVT-g, it's possible to have one integrated graphics
68 device shared by multiple VMs under different hypervisors.
69
70 Note that at least one hypervisor like Xen or KVM is required for
71 this driver to work, and it only supports newer device from
72 Broadwell+. For further information and setup guide, you can
73 visit: http://01.org/igvt-g.
74
75 Now it's just a stub to support the modifications of i915 for
76 GVT device model. It requires at least one MPT modules for Xen/KVM
77 and other components of GVT device model to work. Use it under
78 you own risk.
79
80 If in doubt, say "N".
81
60menu "drm/i915 Debugging" 82menu "drm/i915 Debugging"
61depends on DRM_I915 83depends on DRM_I915
62depends on EXPERT 84depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..276abf1cac2b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
59 intel_bios.o \ 59 intel_bios.o \
60 intel_color.o \ 60 intel_color.o \
61 intel_display.o \ 61 intel_display.o \
62 intel_dpio_phy.o \
62 intel_dpll_mgr.o \ 63 intel_dpll_mgr.o \
63 intel_fbc.o \ 64 intel_fbc.o \
64 intel_fifo_underrun.o \ 65 intel_fifo_underrun.o \
@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
81 dvo_tfp410.o \ 82 dvo_tfp410.o \
82 intel_crt.o \ 83 intel_crt.o \
83 intel_ddi.o \ 84 intel_ddi.o \
85 intel_dp_aux_backlight.o \
84 intel_dp_link_training.o \ 86 intel_dp_link_training.o \
85 intel_dp_mst.o \ 87 intel_dp_mst.o \
86 intel_dp.o \ 88 intel_dp.o \
87 intel_dsi.o \ 89 intel_dsi.o \
90 intel_dsi_dcs_backlight.o \
88 intel_dsi_panel_vbt.o \ 91 intel_dsi_panel_vbt.o \
89 intel_dsi_pll.o \ 92 intel_dsi_pll.o \
90 intel_dvo.o \ 93 intel_dvo.o \
@@ -101,6 +104,11 @@ i915-y += i915_vgpu.o
101# legacy horrors 104# legacy horrors
102i915-y += i915_dma.o 105i915-y += i915_dma.o
103 106
107ifeq ($(CONFIG_DRM_I915_GVT),y)
108i915-y += intel_gvt.o
109include $(src)/gvt/Makefile
110endif
111
104obj-$(CONFIG_DRM_I915) += i915.o 112obj-$(CONFIG_DRM_I915) += i915.o
105 113
106CFLAGS_i915_trace_points.o := -I$(src) 114CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
new file mode 100644
index 000000000000..d0f21a6ad60d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -0,0 +1,5 @@
1GVT_DIR := gvt
2GVT_SOURCE := gvt.o
3
4ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
5i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
new file mode 100644
index 000000000000..7ef412be665f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef __GVT_DEBUG_H__
25#define __GVT_DEBUG_H__
26
27#define gvt_dbg_core(fmt, args...) \
28 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
29
30/*
31 * Other GVT debug stuff will be introduced in the GVT device model patches.
32 */
33
34#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
new file mode 100644
index 000000000000..927f4579f5b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <linux/types.h>
25#include <xen/xen.h>
26
27#include "i915_drv.h"
28
29struct intel_gvt_host intel_gvt_host;
30
31static const char * const supported_hypervisors[] = {
32 [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
33 [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
34};
35
36/**
37 * intel_gvt_init_host - Load MPT modules and detect if we're running in host
38 * @gvt: intel gvt device
39 *
40 * This function is called at the driver loading stage. If failed to find a
41 * loadable MPT module or detect currently we're running in a VM, then GVT-g
42 * will be disabled
43 *
44 * Returns:
45 * Zero on success, negative error code if failed.
46 *
47 */
48int intel_gvt_init_host(void)
49{
50 if (intel_gvt_host.initialized)
51 return 0;
52
53 /* Xen DOM U */
54 if (xen_domain() && !xen_initial_domain())
55 return -ENODEV;
56
57 /* Try to load MPT modules for hypervisors */
58 if (xen_initial_domain()) {
59 /* In Xen dom0 */
60 intel_gvt_host.mpt = try_then_request_module(
61 symbol_get(xengt_mpt), "xengt");
62 intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
63 } else {
64 /* not in Xen. Try KVMGT */
65 intel_gvt_host.mpt = try_then_request_module(
66 symbol_get(kvmgt_mpt), "kvm");
67 intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
68 }
69
70 /* Fail to load MPT modules - bail out */
71 if (!intel_gvt_host.mpt)
72 return -EINVAL;
73
74 /* Try to detect if we're running in host instead of VM. */
75 if (!intel_gvt_hypervisor_detect_host())
76 return -ENODEV;
77
78 gvt_dbg_core("Running with hypervisor %s in host mode\n",
79 supported_hypervisors[intel_gvt_host.hypervisor_type]);
80
81 intel_gvt_host.initialized = true;
82 return 0;
83}
84
85static void init_device_info(struct intel_gvt *gvt)
86{
87 if (IS_BROADWELL(gvt->dev_priv))
88 gvt->device_info.max_support_vgpus = 8;
89 /* This function will grow large in GVT device model patches. */
90}
91
92/**
93 * intel_gvt_clean_device - clean a GVT device
94 * @gvt: intel gvt device
95 *
96 * This function is called at the driver unloading stage, to free the
97 * resources owned by a GVT device.
98 *
99 */
100void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
101{
102 struct intel_gvt *gvt = &dev_priv->gvt;
103
104 if (WARN_ON(!gvt->initialized))
105 return;
106
107 /* Other de-initialization of GVT components will be introduced. */
108
109 gvt->initialized = false;
110}
111
112/**
113 * intel_gvt_init_device - initialize a GVT device
114 * @dev_priv: drm i915 private data
115 *
116 * This function is called at the initialization stage, to initialize
117 * necessary GVT components.
118 *
119 * Returns:
120 * Zero on success, negative error code if failed.
121 *
122 */
123int intel_gvt_init_device(struct drm_i915_private *dev_priv)
124{
125 struct intel_gvt *gvt = &dev_priv->gvt;
126 /*
127 * Cannot initialize GVT device without intel_gvt_host gets
128 * initialized first.
129 */
130 if (WARN_ON(!intel_gvt_host.initialized))
131 return -EINVAL;
132
133 if (WARN_ON(gvt->initialized))
134 return -EEXIST;
135
136 gvt_dbg_core("init gvt device\n");
137
138 init_device_info(gvt);
139 /*
140 * Other initialization of GVT components will be introduce here.
141 */
142 gvt_dbg_core("gvt device creation is done\n");
143 gvt->initialized = true;
144 return 0;
145}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
new file mode 100644
index 000000000000..fb619a6e519d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_H_
25#define _GVT_H_
26
27#include "debug.h"
28#include "hypercall.h"
29
30#define GVT_MAX_VGPU 8
31
32enum {
33 INTEL_GVT_HYPERVISOR_XEN = 0,
34 INTEL_GVT_HYPERVISOR_KVM,
35};
36
37struct intel_gvt_host {
38 bool initialized;
39 int hypervisor_type;
40 struct intel_gvt_mpt *mpt;
41};
42
43extern struct intel_gvt_host intel_gvt_host;
44
45/* Describe per-platform limitations. */
46struct intel_gvt_device_info {
47 u32 max_support_vgpus;
48 /* This data structure will grow bigger in GVT device model patches */
49};
50
51struct intel_vgpu {
52 struct intel_gvt *gvt;
53 int id;
54 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
55};
56
57struct intel_gvt {
58 struct mutex lock;
59 bool initialized;
60
61 struct drm_i915_private *dev_priv;
62 struct idr vgpu_idr; /* vGPU IDR pool */
63
64 struct intel_gvt_device_info device_info;
65};
66
67#include "mpt.h"
68
69#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
new file mode 100644
index 000000000000..254df8bf1f35
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_HYPERCALL_H_
25#define _GVT_HYPERCALL_H_
26
27/*
28 * Specific GVT-g MPT modules function collections. Currently GVT-g supports
29 * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
30 */
31struct intel_gvt_mpt {
32 int (*detect_host)(void);
33};
34
35extern struct intel_gvt_mpt xengt_mpt;
36extern struct intel_gvt_mpt kvmgt_mpt;
37
38#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
new file mode 100644
index 000000000000..03601e3ffa7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_MPT_H_
25#define _GVT_MPT_H_
26
27/**
28 * DOC: Hypervisor Service APIs for GVT-g Core Logic
29 *
30 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
31 * logic. Each kind of hypervisor MPT module provides a collection of function
32 * callbacks and will be attached to GVT host when the driver is loading.
33 * GVT-g core logic will call these APIs to request specific services from
34 * hypervisor.
35 */
36
37/**
38 * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
39 * hypervisor host/privilged domain
40 *
41 * Returns:
42 * Zero on success, -ENODEV if current kernel is running inside a VM
43 */
44static inline int intel_gvt_hypervisor_detect_host(void)
45{
46 return intel_gvt_host.mpt->detect_host();
47}
48
49#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..b0fd6a7b0603 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ), 215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), 218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
219 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
219 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), 220 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
220 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), 221 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
221 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), 222 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
736 737
737/** 738/**
738 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer 739 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
739 * @ring: the ringbuffer to initialize 740 * @engine: the engine to initialize
740 * 741 *
741 * Optionally initializes fields related to batch buffer command parsing in the 742 * Optionally initializes fields related to batch buffer command parsing in the
742 * struct intel_engine_cs based on whether the platform requires software 743 * struct intel_engine_cs based on whether the platform requires software
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
750 int cmd_table_count; 751 int cmd_table_count;
751 int ret; 752 int ret;
752 753
753 if (!IS_GEN7(engine->dev)) 754 if (!IS_GEN7(engine->i915))
754 return 0; 755 return 0;
755 756
756 switch (engine->id) { 757 switch (engine->id) {
757 case RCS: 758 case RCS:
758 if (IS_HASWELL(engine->dev)) { 759 if (IS_HASWELL(engine->i915)) {
759 cmd_tables = hsw_render_ring_cmds; 760 cmd_tables = hsw_render_ring_cmds;
760 cmd_table_count = 761 cmd_table_count =
761 ARRAY_SIZE(hsw_render_ring_cmds); 762 ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
764 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 765 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
765 } 766 }
766 767
767 if (IS_HASWELL(engine->dev)) { 768 if (IS_HASWELL(engine->i915)) {
768 engine->reg_tables = hsw_render_reg_tables; 769 engine->reg_tables = hsw_render_reg_tables;
769 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); 770 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
770 } else { 771 } else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
780 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 781 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
781 break; 782 break;
782 case BCS: 783 case BCS:
783 if (IS_HASWELL(engine->dev)) { 784 if (IS_HASWELL(engine->i915)) {
784 cmd_tables = hsw_blt_ring_cmds; 785 cmd_tables = hsw_blt_ring_cmds;
785 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 786 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
786 } else { 787 } else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
788 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 789 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
789 } 790 }
790 791
791 if (IS_HASWELL(engine->dev)) { 792 if (IS_HASWELL(engine->i915)) {
792 engine->reg_tables = hsw_blt_reg_tables; 793 engine->reg_tables = hsw_blt_reg_tables;
793 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); 794 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
794 } else { 795 } else {
@@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
829 830
830/** 831/**
831 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields 832 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
832 * @ring: the ringbuffer to clean up 833 * @engine: the engine to clean up
833 * 834 *
834 * Releases any resources related to command parsing that may have been 835 * Releases any resources related to command parsing that may have been
835 * initialized for the specified ring. 836 * initialized for the specified ring.
@@ -1023,7 +1024,7 @@ unpin_src:
1023 1024
1024/** 1025/**
1025 * i915_needs_cmd_parser() - should a given ring use software command parsing? 1026 * i915_needs_cmd_parser() - should a given ring use software command parsing?
1026 * @ring: the ring in question 1027 * @engine: the engine in question
1027 * 1028 *
1028 * Only certain platforms require software batch buffer command parsing, and 1029 * Only certain platforms require software batch buffer command parsing, and
1029 * only when enabled via module parameter. 1030 * only when enabled via module parameter.
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
1035 if (!engine->needs_cmd_parser) 1036 if (!engine->needs_cmd_parser)
1036 return false; 1037 return false;
1037 1038
1038 if (!USES_PPGTT(engine->dev)) 1039 if (!USES_PPGTT(engine->i915))
1039 return false; 1040 return false;
1040 1041
1041 return (i915.enable_cmd_parser == 1); 1042 return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1098 return false; 1099 return false;
1099 } 1100 }
1100 1101
1102 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1103 DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
1104 return false;
1105 }
1106
1101 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1107 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1102 *oacontrol_set = (cmd[offset + 1] != 0); 1108 *oacontrol_set = (cmd[offset + 1] != 0);
1103 } 1109 }
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1113 return false; 1119 return false;
1114 } 1120 }
1115 1121
1122 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1123 DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
1124 reg_addr);
1125 return false;
1126 }
1127
1116 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1128 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1117 (offset + 2 > length || 1129 (offset + 2 > length ||
1118 (cmd[offset + 1] & reg->mask) != reg->value)) { 1130 (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1164 1176
1165/** 1177/**
1166 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations 1178 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1167 * @ring: the ring on which the batch is to execute 1179 * @engine: the engine on which the batch is to execute
1168 * @batch_obj: the batch buffer in question 1180 * @batch_obj: the batch buffer in question
1169 * @shadow_batch_obj: copy of the batch buffer in question 1181 * @shadow_batch_obj: copy of the batch buffer in question
1170 * @batch_start_offset: byte offset in the batch at which execution starts 1182 * @batch_start_offset: byte offset in the batch at which execution starts
@@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
1269 1281
1270/** 1282/**
1271 * i915_cmd_parser_get_version() - get the cmd parser version number 1283 * i915_cmd_parser_get_version() - get the cmd parser version number
1284 * @dev_priv: i915 device private
1272 * 1285 *
1273 * The cmd parser maintains a simple increasing integer version number suitable 1286 * The cmd parser maintains a simple increasing integer version number suitable
1274 * for passing to userspace clients to determine what operations are permitted. 1287 * for passing to userspace clients to determine what operations are permitted.
1275 * 1288 *
1276 * Return: the current version number of the cmd parser 1289 * Return: the current version number of the cmd parser
1277 */ 1290 */
1278int i915_cmd_parser_get_version(void) 1291int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1279{ 1292{
1293 struct intel_engine_cs *engine;
1294 bool active = false;
1295
1296 /* If the command parser is not enabled, report 0 - unsupported */
1297 for_each_engine(engine, dev_priv) {
1298 if (i915_needs_cmd_parser(engine)) {
1299 active = true;
1300 break;
1301 }
1302 }
1303 if (!active)
1304 return 0;
1305
1280 /* 1306 /*
1281 * Command parser version history 1307 * Command parser version history
1282 * 1308 *
@@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void)
1288 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. 1314 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1289 * 5. GPGPU dispatch compute indirect registers. 1315 * 5. GPGPU dispatch compute indirect registers.
1290 * 6. TIMESTAMP register and Haswell CS GPR registers 1316 * 6. TIMESTAMP register and Haswell CS GPR registers
1317 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1291 */ 1318 */
1292 return 6; 1319 return 7;
1293} 1320}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..5b7526697838 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
89 return 0; 89 return 0;
90} 90}
91 91
92static const char get_active_flag(struct drm_i915_gem_object *obj) 92static char get_active_flag(struct drm_i915_gem_object *obj)
93{ 93{
94 return obj->active ? '*' : ' '; 94 return obj->active ? '*' : ' ';
95} 95}
96 96
97static const char get_pin_flag(struct drm_i915_gem_object *obj) 97static char get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 return obj->pin_display ? 'p' : ' '; 99 return obj->pin_display ? 'p' : ' ';
100} 100}
101 101
102static const char get_tiling_flag(struct drm_i915_gem_object *obj) 102static char get_tiling_flag(struct drm_i915_gem_object *obj)
103{ 103{
104 switch (obj->tiling_mode) { 104 switch (obj->tiling_mode) {
105 default: 105 default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
109 } 109 }
110} 110}
111 111
112static inline const char get_global_flag(struct drm_i915_gem_object *obj) 112static char get_global_flag(struct drm_i915_gem_object *obj)
113{ 113{
114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; 114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115} 115}
116 116
117static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 117static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->mapping ? 'M' : ' '; 119 return obj->mapping ? 'M' : ' ';
120} 120}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200} 200}
201 201
202static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203{
204 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206 seq_putc(m, ' ');
207}
208
209static int i915_gem_object_list_info(struct seq_file *m, void *data) 202static int i915_gem_object_list_info(struct seq_file *m, void *data)
210{ 203{
211 struct drm_info_node *node = m->private; 204 struct drm_info_node *node = m->private;
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
424 print_file_stats(m, "[k]batch pool", stats); 417 print_file_stats(m, "[k]batch pool", stats);
425} 418}
426 419
420static int per_file_ctx_stats(int id, void *ptr, void *data)
421{
422 struct i915_gem_context *ctx = ptr;
423 int n;
424
425 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
426 if (ctx->engine[n].state)
427 per_file_stats(0, ctx->engine[n].state, data);
428 if (ctx->engine[n].ringbuf)
429 per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
430 }
431
432 return 0;
433}
434
435static void print_context_stats(struct seq_file *m,
436 struct drm_i915_private *dev_priv)
437{
438 struct file_stats stats;
439 struct drm_file *file;
440
441 memset(&stats, 0, sizeof(stats));
442
443 mutex_lock(&dev_priv->dev->struct_mutex);
444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446
447 list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 }
451 mutex_unlock(&dev_priv->dev->struct_mutex);
452
453 print_file_stats(m, "[k]contexts", stats);
454}
455
427#define count_vmas(list, member) do { \ 456#define count_vmas(list, member) do { \
428 list_for_each_entry(vma, list, member) { \ 457 list_for_each_entry(vma, list, member) { \
429 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 458 size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
528 557
529 seq_putc(m, '\n'); 558 seq_putc(m, '\n');
530 print_batch_pool_stats(m, dev_priv); 559 print_batch_pool_stats(m, dev_priv);
531
532 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
533 561
534 mutex_lock(&dev->filelist_mutex); 562 mutex_lock(&dev->filelist_mutex);
563 print_context_stats(m, dev_priv);
535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 564 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
536 struct file_stats stats; 565 struct file_stats stats;
537 struct task_struct *task; 566 struct task_struct *task;
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
607 for_each_intel_crtc(dev, crtc) { 636 for_each_intel_crtc(dev, crtc) {
608 const char pipe = pipe_name(crtc->pipe); 637 const char pipe = pipe_name(crtc->pipe);
609 const char plane = plane_name(crtc->plane); 638 const char plane = plane_name(crtc->plane);
610 struct intel_unpin_work *work; 639 struct intel_flip_work *work;
611 640
612 spin_lock_irq(&dev->event_lock); 641 spin_lock_irq(&dev->event_lock);
613 work = crtc->unpin_work; 642 work = crtc->flip_work;
614 if (work == NULL) { 643 if (work == NULL) {
615 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 644 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
616 pipe, plane); 645 pipe, plane);
617 } else { 646 } else {
647 u32 pending;
618 u32 addr; 648 u32 addr;
619 649
620 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 650 pending = atomic_read(&work->pending);
621 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 651 if (pending) {
652 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
622 pipe, plane); 653 pipe, plane);
623 } else { 654 } else {
624 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 655 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -638,11 +669,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
639 work->flip_queued_vblank, 670 work->flip_queued_vblank,
640 work->flip_ready_vblank, 671 work->flip_ready_vblank,
641 drm_crtc_vblank_count(&crtc->base)); 672 intel_crtc_get_vblank_counter(crtc));
642 if (work->enable_stall_check)
643 seq_puts(m, "Stall check enabled, ");
644 else
645 seq_puts(m, "Stall check waiting for page flip ioctl, ");
646 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 673 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
647 674
648 if (INTEL_INFO(dev)->gen >= 4) 675 if (INTEL_INFO(dev)->gen >= 4)
@@ -1281,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1281 } 1308 }
1282 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1309 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1283 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1310 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1311 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1284 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1312 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1285 seq_printf(m, "Render p-state ratio: %d\n", 1313 seq_printf(m, "Render p-state ratio: %d\n",
1286 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1314 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1383,7 +1411,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1383 seqno[id] = engine->get_seqno(engine); 1411 seqno[id] = engine->get_seqno(engine);
1384 } 1412 }
1385 1413
1386 i915_get_extra_instdone(dev, instdone); 1414 i915_get_extra_instdone(dev_priv, instdone);
1387 1415
1388 intel_runtime_pm_put(dev_priv); 1416 intel_runtime_pm_put(dev_priv);
1389 1417
@@ -1991,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1991 struct drm_device *dev = node->minor->dev; 2019 struct drm_device *dev = node->minor->dev;
1992 struct drm_i915_private *dev_priv = dev->dev_private; 2020 struct drm_i915_private *dev_priv = dev->dev_private;
1993 struct intel_engine_cs *engine; 2021 struct intel_engine_cs *engine;
1994 struct intel_context *ctx; 2022 struct i915_gem_context *ctx;
1995 enum intel_engine_id id;
1996 int ret; 2023 int ret;
1997 2024
1998 ret = mutex_lock_interruptible(&dev->struct_mutex); 2025 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
2000 return ret; 2027 return ret;
2001 2028
2002 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2029 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2003 if (!i915.enable_execlists && 2030 seq_printf(m, "HW context %u ", ctx->hw_id);
2004 ctx->legacy_hw_ctx.rcs_state == NULL) 2031 if (IS_ERR(ctx->file_priv)) {
2005 continue; 2032 seq_puts(m, "(deleted) ");
2006 2033 } else if (ctx->file_priv) {
2007 seq_puts(m, "HW context "); 2034 struct pid *pid = ctx->file_priv->file->pid;
2008 describe_ctx(m, ctx); 2035 struct task_struct *task;
2009 if (ctx == dev_priv->kernel_context)
2010 seq_printf(m, "(kernel context) ");
2011 2036
2012 if (i915.enable_execlists) { 2037 task = get_pid_task(pid, PIDTYPE_PID);
2013 seq_putc(m, '\n'); 2038 if (task) {
2014 for_each_engine_id(engine, dev_priv, id) { 2039 seq_printf(m, "(%s [%d]) ",
2015 struct drm_i915_gem_object *ctx_obj = 2040 task->comm, task->pid);
2016 ctx->engine[id].state; 2041 put_task_struct(task);
2017 struct intel_ringbuffer *ringbuf =
2018 ctx->engine[id].ringbuf;
2019
2020 seq_printf(m, "%s: ", engine->name);
2021 if (ctx_obj)
2022 describe_obj(m, ctx_obj);
2023 if (ringbuf)
2024 describe_ctx_ringbuf(m, ringbuf);
2025 seq_putc(m, '\n');
2026 } 2042 }
2027 } else { 2043 } else {
2028 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 2044 seq_puts(m, "(kernel) ");
2045 }
2046
2047 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
2048 seq_putc(m, '\n');
2049
2050 for_each_engine(engine, dev_priv) {
2051 struct intel_context *ce = &ctx->engine[engine->id];
2052
2053 seq_printf(m, "%s: ", engine->name);
2054 seq_putc(m, ce->initialised ? 'I' : 'i');
2055 if (ce->state)
2056 describe_obj(m, ce->state);
2057 if (ce->ringbuf)
2058 describe_ctx_ringbuf(m, ce->ringbuf);
2059 seq_putc(m, '\n');
2029 } 2060 }
2030 2061
2031 seq_putc(m, '\n'); 2062 seq_putc(m, '\n');
@@ -2037,24 +2068,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
2037} 2068}
2038 2069
2039static void i915_dump_lrc_obj(struct seq_file *m, 2070static void i915_dump_lrc_obj(struct seq_file *m,
2040 struct intel_context *ctx, 2071 struct i915_gem_context *ctx,
2041 struct intel_engine_cs *engine) 2072 struct intel_engine_cs *engine)
2042{ 2073{
2074 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2043 struct page *page; 2075 struct page *page;
2044 uint32_t *reg_state; 2076 uint32_t *reg_state;
2045 int j; 2077 int j;
2046 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2047 unsigned long ggtt_offset = 0; 2078 unsigned long ggtt_offset = 0;
2048 2079
2080 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2081
2049 if (ctx_obj == NULL) { 2082 if (ctx_obj == NULL) {
2050 seq_printf(m, "Context on %s with no gem object\n", 2083 seq_puts(m, "\tNot allocated\n");
2051 engine->name);
2052 return; 2084 return;
2053 } 2085 }
2054 2086
2055 seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2056 intel_execlists_ctx_id(ctx, engine));
2057
2058 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2087 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2059 seq_puts(m, "\tNot bound in GGTT\n"); 2088 seq_puts(m, "\tNot bound in GGTT\n");
2060 else 2089 else
@@ -2087,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2087 struct drm_device *dev = node->minor->dev; 2116 struct drm_device *dev = node->minor->dev;
2088 struct drm_i915_private *dev_priv = dev->dev_private; 2117 struct drm_i915_private *dev_priv = dev->dev_private;
2089 struct intel_engine_cs *engine; 2118 struct intel_engine_cs *engine;
2090 struct intel_context *ctx; 2119 struct i915_gem_context *ctx;
2091 int ret; 2120 int ret;
2092 2121
2093 if (!i915.enable_execlists) { 2122 if (!i915.enable_execlists) {
@@ -2100,9 +2129,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2100 return ret; 2129 return ret;
2101 2130
2102 list_for_each_entry(ctx, &dev_priv->context_list, link) 2131 list_for_each_entry(ctx, &dev_priv->context_list, link)
2103 if (ctx != dev_priv->kernel_context) 2132 for_each_engine(engine, dev_priv)
2104 for_each_engine(engine, dev_priv) 2133 i915_dump_lrc_obj(m, ctx, engine);
2105 i915_dump_lrc_obj(m, ctx, engine);
2106 2134
2107 mutex_unlock(&dev->struct_mutex); 2135 mutex_unlock(&dev->struct_mutex);
2108 2136
@@ -2173,8 +2201,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2173 2201
2174 seq_printf(m, "\t%d requests in queue\n", count); 2202 seq_printf(m, "\t%d requests in queue\n", count);
2175 if (head_req) { 2203 if (head_req) {
2176 seq_printf(m, "\tHead request id: %u\n", 2204 seq_printf(m, "\tHead request context: %u\n",
2177 intel_execlists_ctx_id(head_req->ctx, engine)); 2205 head_req->ctx->hw_id);
2178 seq_printf(m, "\tHead request tail: %u\n", 2206 seq_printf(m, "\tHead request tail: %u\n",
2179 head_req->tail); 2207 head_req->tail);
2180 } 2208 }
@@ -2268,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2268 2296
2269static int per_file_ctx(int id, void *ptr, void *data) 2297static int per_file_ctx(int id, void *ptr, void *data)
2270{ 2298{
2271 struct intel_context *ctx = ptr; 2299 struct i915_gem_context *ctx = ptr;
2272 struct seq_file *m = data; 2300 struct seq_file *m = data;
2273 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2301 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2274 2302
@@ -2313,12 +2341,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2313 struct drm_i915_private *dev_priv = dev->dev_private; 2341 struct drm_i915_private *dev_priv = dev->dev_private;
2314 struct intel_engine_cs *engine; 2342 struct intel_engine_cs *engine;
2315 2343
2316 if (INTEL_INFO(dev)->gen == 6) 2344 if (IS_GEN6(dev_priv))
2317 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2345 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2318 2346
2319 for_each_engine(engine, dev_priv) { 2347 for_each_engine(engine, dev_priv) {
2320 seq_printf(m, "%s\n", engine->name); 2348 seq_printf(m, "%s\n", engine->name);
2321 if (INTEL_INFO(dev)->gen == 7) 2349 if (IS_GEN7(dev_priv))
2322 seq_printf(m, "GFX_MODE: 0x%08x\n", 2350 seq_printf(m, "GFX_MODE: 0x%08x\n",
2323 I915_READ(RING_MODE_GEN7(engine))); 2351 I915_READ(RING_MODE_GEN7(engine)));
2324 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2352 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2365,16 +2393,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2365 task = get_pid_task(file->pid, PIDTYPE_PID); 2393 task = get_pid_task(file->pid, PIDTYPE_PID);
2366 if (!task) { 2394 if (!task) {
2367 ret = -ESRCH; 2395 ret = -ESRCH;
2368 goto out_put; 2396 goto out_unlock;
2369 } 2397 }
2370 seq_printf(m, "\nproc: %s\n", task->comm); 2398 seq_printf(m, "\nproc: %s\n", task->comm);
2371 put_task_struct(task); 2399 put_task_struct(task);
2372 idr_for_each(&file_priv->context_idr, per_file_ctx, 2400 idr_for_each(&file_priv->context_idr, per_file_ctx,
2373 (void *)(unsigned long)m); 2401 (void *)(unsigned long)m);
2374 } 2402 }
2403out_unlock:
2375 mutex_unlock(&dev->filelist_mutex); 2404 mutex_unlock(&dev->filelist_mutex);
2376 2405
2377out_put:
2378 intel_runtime_pm_put(dev_priv); 2406 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex); 2407 mutex_unlock(&dev->struct_mutex);
2380 2408
@@ -2509,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m,
2509 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2537 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2510 client->wq_size, client->wq_offset, client->wq_tail); 2538 client->wq_size, client->wq_offset, client->wq_tail);
2511 2539
2540 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2512 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2541 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2513 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2542 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2514 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2543 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
@@ -2545,6 +2574,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
2545 2574
2546 mutex_unlock(&dev->struct_mutex); 2575 mutex_unlock(&dev->struct_mutex);
2547 2576
2577 seq_printf(m, "Doorbell map:\n");
2578 seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
2579 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
2580
2548 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2581 seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2549 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2582 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2550 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2583 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
@@ -3168,7 +3201,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3168 enum intel_engine_id id; 3201 enum intel_engine_id id;
3169 int j, ret; 3202 int j, ret;
3170 3203
3171 if (!i915_semaphore_is_enabled(dev)) { 3204 if (!i915_semaphore_is_enabled(dev_priv)) {
3172 seq_puts(m, "Semaphores are disabled\n"); 3205 seq_puts(m, "Semaphores are disabled\n");
3173 return 0; 3206 return 0;
3174 } 3207 }
@@ -4769,7 +4802,7 @@ i915_wedged_set(void *data, u64 val)
4769 4802
4770 intel_runtime_pm_get(dev_priv); 4803 intel_runtime_pm_get(dev_priv);
4771 4804
4772 i915_handle_error(dev, val, 4805 i915_handle_error(dev_priv, val,
4773 "Manually setting wedged to %llu", val); 4806 "Manually setting wedged to %llu", val);
4774 4807
4775 intel_runtime_pm_put(dev_priv); 4808 intel_runtime_pm_put(dev_priv);
@@ -4919,7 +4952,7 @@ i915_drop_caches_set(void *data, u64 val)
4919 } 4952 }
4920 4953
4921 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4954 if (val & (DROP_RETIRE | DROP_ACTIVE))
4922 i915_gem_retire_requests(dev); 4955 i915_gem_retire_requests(dev_priv);
4923 4956
4924 if (val & DROP_BOUND) 4957 if (val & DROP_BOUND)
4925 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4958 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4993,7 +5026,7 @@ i915_max_freq_set(void *data, u64 val)
4993 5026
4994 dev_priv->rps.max_freq_softlimit = val; 5027 dev_priv->rps.max_freq_softlimit = val;
4995 5028
4996 intel_set_rps(dev, val); 5029 intel_set_rps(dev_priv, val);
4997 5030
4998 mutex_unlock(&dev_priv->rps.hw_lock); 5031 mutex_unlock(&dev_priv->rps.hw_lock);
4999 5032
@@ -5060,7 +5093,7 @@ i915_min_freq_set(void *data, u64 val)
5060 5093
5061 dev_priv->rps.min_freq_softlimit = val; 5094 dev_priv->rps.min_freq_softlimit = val;
5062 5095
5063 intel_set_rps(dev, val); 5096 intel_set_rps(dev_priv, val);
5064 5097
5065 mutex_unlock(&dev_priv->rps.hw_lock); 5098 mutex_unlock(&dev_priv->rps.hw_lock);
5066 5099
@@ -5277,6 +5310,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
5277 INTEL_INFO(dev)->eu_total); 5310 INTEL_INFO(dev)->eu_total);
5278 seq_printf(m, " Available EU Per Subslice: %u\n", 5311 seq_printf(m, " Available EU Per Subslice: %u\n",
5279 INTEL_INFO(dev)->eu_per_subslice); 5312 INTEL_INFO(dev)->eu_per_subslice);
5313 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
5314 if (HAS_POOLED_EU(dev))
5315 seq_printf(m, " Min EU in pool: %u\n",
5316 INTEL_INFO(dev)->min_eu_in_pool);
5280 seq_printf(m, " Has Slice Power Gating: %s\n", 5317 seq_printf(m, " Has Slice Power Gating: %s\n",
5281 yesno(INTEL_INFO(dev)->has_slice_pg)); 5318 yesno(INTEL_INFO(dev)->has_slice_pg));
5282 seq_printf(m, " Has Subslice Power Gating: %s\n", 5319 seq_printf(m, " Has Subslice Power Gating: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b3198fcd0536..d15a461fa84a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
186 value = 1; 186 value = 1;
187 break; 187 break;
188 case I915_PARAM_HAS_SEMAPHORES: 188 case I915_PARAM_HAS_SEMAPHORES:
189 value = i915_semaphore_is_enabled(dev); 189 value = i915_semaphore_is_enabled(dev_priv);
190 break; 190 break;
191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192 value = 1; 192 value = 1;
@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
204 value = 1; 204 value = 1;
205 break; 205 break;
206 case I915_PARAM_CMD_PARSER_VERSION: 206 case I915_PARAM_CMD_PARSER_VERSION:
207 value = i915_cmd_parser_get_version(); 207 value = i915_cmd_parser_get_version(dev_priv);
208 break; 208 break;
209 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 209 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210 value = 1; 210 value = 1;
@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
223 return -ENODEV; 223 return -ENODEV;
224 break; 224 break;
225 case I915_PARAM_HAS_GPU_RESET: 225 case I915_PARAM_HAS_GPU_RESET:
226 value = i915.enable_hangcheck && 226 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227 intel_has_gpu_reset(dev);
228 break; 227 break;
229 case I915_PARAM_HAS_RESOURCE_STREAMER: 228 case I915_PARAM_HAS_RESOURCE_STREAMER:
230 value = HAS_RESOURCE_STREAMER(dev); 229 value = HAS_RESOURCE_STREAMER(dev);
@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
425 .can_switch = i915_switcheroo_can_switch, 424 .can_switch = i915_switcheroo_can_switch,
426}; 425};
427 426
427static void i915_gem_fini(struct drm_device *dev)
428{
429 struct drm_i915_private *dev_priv = to_i915(dev);
430
431 /*
432 * Neither the BIOS, ourselves or any other kernel
433 * expects the system to be in execlists mode on startup,
434 * so we need to reset the GPU back to legacy mode. And the only
435 * known way to disable logical contexts is through a GPU reset.
436 *
437 * So in order to leave the system in a known default configuration,
438 * always reset the GPU upon unload. Afterwards we then clean up the
439 * GEM state tracking, flushing off the requests and leaving the
440 * system in a known idle state.
441 *
442 * Note that is of the upmost importance that the GPU is idle and
443 * all stray writes are flushed *before* we dismantle the backing
444 * storage for the pinned objects.
445 *
446 * However, since we are uncertain that reseting the GPU on older
447 * machines is a good idea, we don't - just in case it leaves the
448 * machine in an unusable condition.
449 */
450 if (HAS_HW_CONTEXTS(dev)) {
451 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452 WARN_ON(reset && reset != -ENODEV);
453 }
454
455 mutex_lock(&dev->struct_mutex);
456 i915_gem_reset(dev);
457 i915_gem_cleanup_engines(dev);
458 i915_gem_context_fini(dev);
459 mutex_unlock(&dev->struct_mutex);
460
461 WARN_ON(!list_empty(&to_i915(dev)->context_list));
462}
463
428static int i915_load_modeset_init(struct drm_device *dev) 464static int i915_load_modeset_init(struct drm_device *dev)
429{ 465{
430 struct drm_i915_private *dev_priv = dev->dev_private; 466 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
454 if (ret) 490 if (ret)
455 goto cleanup_vga_client; 491 goto cleanup_vga_client;
456 492
493 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
494 intel_update_rawclk(dev_priv);
495
457 intel_power_domains_init_hw(dev_priv, false); 496 intel_power_domains_init_hw(dev_priv, false);
458 497
459 intel_csr_ucode_init(dev_priv); 498 intel_csr_ucode_init(dev_priv);
@@ -468,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
468 * working irqs for e.g. gmbus and dp aux transfers. */ 507 * working irqs for e.g. gmbus and dp aux transfers. */
469 intel_modeset_init(dev); 508 intel_modeset_init(dev);
470 509
471 intel_guc_ucode_init(dev); 510 intel_guc_init(dev);
472 511
473 ret = i915_gem_init(dev); 512 ret = i915_gem_init(dev);
474 if (ret) 513 if (ret)
@@ -503,12 +542,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
503 return 0; 542 return 0;
504 543
505cleanup_gem: 544cleanup_gem:
506 mutex_lock(&dev->struct_mutex); 545 i915_gem_fini(dev);
507 i915_gem_cleanup_engines(dev);
508 i915_gem_context_fini(dev);
509 mutex_unlock(&dev->struct_mutex);
510cleanup_irq: 546cleanup_irq:
511 intel_guc_ucode_fini(dev); 547 intel_guc_fini(dev);
512 drm_irq_uninstall(dev); 548 drm_irq_uninstall(dev);
513 intel_teardown_gmbus(dev); 549 intel_teardown_gmbus(dev);
514cleanup_csr: 550cleanup_csr:
@@ -728,6 +764,32 @@ static void gen9_sseu_info_init(struct drm_device *dev)
728 (info->slice_total > 1)); 764 (info->slice_total > 1));
729 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); 765 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
730 info->has_eu_pg = (info->eu_per_subslice > 2); 766 info->has_eu_pg = (info->eu_per_subslice > 2);
767
768 if (IS_BROXTON(dev)) {
769#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss))
770 /*
771 * There is a HW issue in 2x6 fused down parts that requires
772 * Pooled EU to be enabled as a WA. The pool configuration
773 * changes depending upon which subslice is fused down. This
774 * doesn't affect if the device has all 3 subslices enabled.
775 */
776 /* WaEnablePooledEuFor2x6:bxt */
777 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
778 (info->subslice_per_slice == 2 &&
779 INTEL_REVID(dev) < BXT_REVID_C0));
780
781 info->min_eu_in_pool = 0;
782 if (info->has_pooled_eu) {
783 if (IS_SS_DISABLED(ss_disable, 0) ||
784 IS_SS_DISABLED(ss_disable, 2))
785 info->min_eu_in_pool = 3;
786 else if (IS_SS_DISABLED(ss_disable, 1))
787 info->min_eu_in_pool = 6;
788 else
789 info->min_eu_in_pool = 9;
790 }
791#undef IS_SS_DISABLED
792 }
731} 793}
732 794
733static void broadwell_sseu_info_init(struct drm_device *dev) 795static void broadwell_sseu_info_init(struct drm_device *dev)
@@ -850,7 +912,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
850 DRM_INFO("Display disabled (module parameter)\n"); 912 DRM_INFO("Display disabled (module parameter)\n");
851 info->num_pipes = 0; 913 info->num_pipes = 0;
852 } else if (info->num_pipes > 0 && 914 } else if (info->num_pipes > 0 &&
853 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && 915 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
854 HAS_PCH_SPLIT(dev)) { 916 HAS_PCH_SPLIT(dev)) {
855 u32 fuse_strap = I915_READ(FUSE_STRAP); 917 u32 fuse_strap = I915_READ(FUSE_STRAP);
856 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 918 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -874,7 +936,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
874 DRM_INFO("PipeC fused off\n"); 936 DRM_INFO("PipeC fused off\n");
875 info->num_pipes -= 1; 937 info->num_pipes -= 1;
876 } 938 }
877 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { 939 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
878 u32 dfsm = I915_READ(SKL_DFSM); 940 u32 dfsm = I915_READ(SKL_DFSM);
879 u8 disabled_mask = 0; 941 u8 disabled_mask = 0;
880 bool invalid; 942 bool invalid;
@@ -915,21 +977,40 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
915 else if (INTEL_INFO(dev)->gen >= 9) 977 else if (INTEL_INFO(dev)->gen >= 9)
916 gen9_sseu_info_init(dev); 978 gen9_sseu_info_init(dev);
917 979
918 /* Snooping is broken on BXT A stepping. */
919 info->has_snoop = !info->has_llc; 980 info->has_snoop = !info->has_llc;
920 info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); 981
982 /* Snooping is broken on BXT A stepping. */
983 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
984 info->has_snoop = false;
921 985
922 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); 986 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
923 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); 987 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
924 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); 988 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
925 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); 989 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
926 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); 990 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
991 DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n");
992 if (HAS_POOLED_EU(dev))
993 DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool);
927 DRM_DEBUG_DRIVER("has slice power gating: %s\n", 994 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
928 info->has_slice_pg ? "y" : "n"); 995 info->has_slice_pg ? "y" : "n");
929 DRM_DEBUG_DRIVER("has subslice power gating: %s\n", 996 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
930 info->has_subslice_pg ? "y" : "n"); 997 info->has_subslice_pg ? "y" : "n");
931 DRM_DEBUG_DRIVER("has EU power gating: %s\n", 998 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
932 info->has_eu_pg ? "y" : "n"); 999 info->has_eu_pg ? "y" : "n");
1000
1001 i915.enable_execlists =
1002 intel_sanitize_enable_execlists(dev_priv,
1003 i915.enable_execlists);
1004
1005 /*
1006 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1007 * user's requested state against the hardware/driver capabilities. We
1008 * do this now so that we can print out any log messages once rather
1009 * than every time we check intel_enable_ppgtt().
1010 */
1011 i915.enable_ppgtt =
1012 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1013 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
933} 1014}
934 1015
935static void intel_init_dpio(struct drm_i915_private *dev_priv) 1016static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -1020,6 +1101,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1020 memcpy(device_info, info, sizeof(dev_priv->info)); 1101 memcpy(device_info, info, sizeof(dev_priv->info));
1021 device_info->device_id = dev->pdev->device; 1102 device_info->device_id = dev->pdev->device;
1022 1103
1104 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1105 device_info->gen_mask = BIT(device_info->gen - 1);
1106
1023 spin_lock_init(&dev_priv->irq_lock); 1107 spin_lock_init(&dev_priv->irq_lock);
1024 spin_lock_init(&dev_priv->gpu_error.lock); 1108 spin_lock_init(&dev_priv->gpu_error.lock);
1025 mutex_init(&dev_priv->backlight_lock); 1109 mutex_init(&dev_priv->backlight_lock);
@@ -1036,6 +1120,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1036 if (ret < 0) 1120 if (ret < 0)
1037 return ret; 1121 return ret;
1038 1122
1123 ret = intel_gvt_init(dev_priv);
1124 if (ret < 0)
1125 goto err_workqueues;
1126
1039 /* This must be called before any calls to HAS_PCH_* */ 1127 /* This must be called before any calls to HAS_PCH_* */
1040 intel_detect_pch(dev); 1128 intel_detect_pch(dev);
1041 1129
@@ -1061,6 +1149,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1061 "It may not be fully functional.\n"); 1149 "It may not be fully functional.\n");
1062 1150
1063 return 0; 1151 return 0;
1152
1153err_workqueues:
1154 i915_workqueues_cleanup(dev_priv);
1155 return ret;
1064} 1156}
1065 1157
1066/** 1158/**
@@ -1137,7 +1229,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1137 if (ret < 0) 1229 if (ret < 0)
1138 goto put_bridge; 1230 goto put_bridge;
1139 1231
1140 intel_uncore_init(dev); 1232 intel_uncore_init(dev_priv);
1141 1233
1142 return 0; 1234 return 0;
1143 1235
@@ -1155,7 +1247,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1155{ 1247{
1156 struct drm_device *dev = dev_priv->dev; 1248 struct drm_device *dev = dev_priv->dev;
1157 1249
1158 intel_uncore_fini(dev); 1250 intel_uncore_fini(dev_priv);
1159 i915_mmio_cleanup(dev); 1251 i915_mmio_cleanup(dev);
1160 pci_dev_put(dev_priv->bridge_dev); 1252 pci_dev_put(dev_priv->bridge_dev);
1161} 1253}
@@ -1206,8 +1298,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1206 pci_set_master(dev->pdev); 1298 pci_set_master(dev->pdev);
1207 1299
1208 /* overlay on gen2 is broken and can't address above 1G */ 1300 /* overlay on gen2 is broken and can't address above 1G */
1209 if (IS_GEN2(dev)) 1301 if (IS_GEN2(dev)) {
1210 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1302 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1303 if (ret) {
1304 DRM_ERROR("failed to set DMA mask\n");
1305
1306 goto out_ggtt;
1307 }
1308 }
1309
1211 1310
1212 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1311 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1213 * using 32bit addressing, overwriting memory if HWS is located 1312 * using 32bit addressing, overwriting memory if HWS is located
@@ -1217,8 +1316,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1217 * behaviour if any general state is accessed within a page above 4GB, 1316 * behaviour if any general state is accessed within a page above 4GB,
1218 * which also needs to be handled carefully. 1317 * which also needs to be handled carefully.
1219 */ 1318 */
1220 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1319 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1221 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1320 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1321
1322 if (ret) {
1323 DRM_ERROR("failed to set DMA mask\n");
1324
1325 goto out_ggtt;
1326 }
1327 }
1222 1328
1223 aperture_size = ggtt->mappable_end; 1329 aperture_size = ggtt->mappable_end;
1224 1330
@@ -1236,9 +1342,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1236 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1342 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1237 PM_QOS_DEFAULT_VALUE); 1343 PM_QOS_DEFAULT_VALUE);
1238 1344
1239 intel_uncore_sanitize(dev); 1345 intel_uncore_sanitize(dev_priv);
1240 1346
1241 intel_opregion_setup(dev); 1347 intel_opregion_setup(dev_priv);
1242 1348
1243 i915_gem_load_init_fences(dev_priv); 1349 i915_gem_load_init_fences(dev_priv);
1244 1350
@@ -1300,14 +1406,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
1300 * Notify a valid surface after modesetting, 1406 * Notify a valid surface after modesetting,
1301 * when running inside a VM. 1407 * when running inside a VM.
1302 */ 1408 */
1303 if (intel_vgpu_active(dev)) 1409 if (intel_vgpu_active(dev_priv))
1304 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1410 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1305 1411
1306 i915_setup_sysfs(dev); 1412 i915_setup_sysfs(dev);
1307 1413
1308 if (INTEL_INFO(dev_priv)->num_pipes) { 1414 if (INTEL_INFO(dev_priv)->num_pipes) {
1309 /* Must be done after probing outputs */ 1415 /* Must be done after probing outputs */
1310 intel_opregion_init(dev); 1416 intel_opregion_register(dev_priv);
1311 acpi_video_register(); 1417 acpi_video_register();
1312 } 1418 }
1313 1419
@@ -1326,7 +1432,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1326 i915_audio_component_cleanup(dev_priv); 1432 i915_audio_component_cleanup(dev_priv);
1327 intel_gpu_ips_teardown(); 1433 intel_gpu_ips_teardown();
1328 acpi_video_unregister(); 1434 acpi_video_unregister();
1329 intel_opregion_fini(dev_priv->dev); 1435 intel_opregion_unregister(dev_priv);
1330 i915_teardown_sysfs(dev_priv->dev); 1436 i915_teardown_sysfs(dev_priv->dev);
1331 i915_gem_shrinker_cleanup(dev_priv); 1437 i915_gem_shrinker_cleanup(dev_priv);
1332} 1438}
@@ -1418,6 +1524,8 @@ int i915_driver_unload(struct drm_device *dev)
1418 1524
1419 intel_fbdev_fini(dev); 1525 intel_fbdev_fini(dev);
1420 1526
1527 intel_gvt_cleanup(dev_priv);
1528
1421 ret = i915_gem_suspend(dev); 1529 ret = i915_gem_suspend(dev);
1422 if (ret) { 1530 if (ret) {
1423 DRM_ERROR("failed to idle hardware: %d\n", ret); 1531 DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -1458,11 +1566,8 @@ int i915_driver_unload(struct drm_device *dev)
1458 /* Flush any outstanding unpin_work. */ 1566 /* Flush any outstanding unpin_work. */
1459 flush_workqueue(dev_priv->wq); 1567 flush_workqueue(dev_priv->wq);
1460 1568
1461 intel_guc_ucode_fini(dev); 1569 intel_guc_fini(dev);
1462 mutex_lock(&dev->struct_mutex); 1570 i915_gem_fini(dev);
1463 i915_gem_cleanup_engines(dev);
1464 i915_gem_context_fini(dev);
1465 mutex_unlock(&dev->struct_mutex);
1466 intel_fbc_cleanup_cfb(dev_priv); 1571 intel_fbc_cleanup_cfb(dev_priv);
1467 1572
1468 intel_power_domains_fini(dev_priv); 1573 intel_power_domains_fini(dev_priv);
@@ -1570,15 +1675,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1570 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 1675 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1571 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1676 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1572 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 1677 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1573 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), 1678 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1574 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), 1679 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1575 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 1680 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1576 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 1681 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1577 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1682 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1578 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 1683 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1579 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 1684 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1580 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 1685 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1581 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), 1686 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1582 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 1687 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1583 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 1688 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1584 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 1689 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f313b4d8344f..3eb47fbcea73 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,11 +35,9 @@
35#include "i915_trace.h" 35#include "i915_trace.h"
36#include "intel_drv.h" 36#include "intel_drv.h"
37 37
38#include <linux/apple-gmux.h>
39#include <linux/console.h> 38#include <linux/console.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/pm_runtime.h> 40#include <linux/pm_runtime.h>
42#include <linux/vgaarb.h>
43#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
44#include <drm/drm_crtc_helper.h> 42#include <drm/drm_crtc_helper.h>
45 43
@@ -300,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
300static const struct intel_device_info intel_broadwell_d_info = { 298static const struct intel_device_info intel_broadwell_d_info = {
301 BDW_FEATURES, 299 BDW_FEATURES,
302 .gen = 8, 300 .gen = 8,
301 .is_broadwell = 1,
303}; 302};
304 303
305static const struct intel_device_info intel_broadwell_m_info = { 304static const struct intel_device_info intel_broadwell_m_info = {
306 BDW_FEATURES, 305 BDW_FEATURES,
307 .gen = 8, .is_mobile = 1, 306 .gen = 8, .is_mobile = 1,
307 .is_broadwell = 1,
308}; 308};
309 309
310static const struct intel_device_info intel_broadwell_gt3d_info = { 310static const struct intel_device_info intel_broadwell_gt3d_info = {
311 BDW_FEATURES, 311 BDW_FEATURES,
312 .gen = 8, 312 .gen = 8,
313 .is_broadwell = 1,
313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
314}; 315};
315 316
316static const struct intel_device_info intel_broadwell_gt3m_info = { 317static const struct intel_device_info intel_broadwell_gt3m_info = {
317 BDW_FEATURES, 318 BDW_FEATURES,
318 .gen = 8, .is_mobile = 1, 319 .gen = 8, .is_mobile = 1,
320 .is_broadwell = 1,
319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
320}; 322};
321 323
@@ -353,6 +355,7 @@ static const struct intel_device_info intel_broxton_info = {
353 .has_ddi = 1, 355 .has_ddi = 1,
354 .has_fpga_dbg = 1, 356 .has_fpga_dbg = 1,
355 .has_fbc = 1, 357 .has_fbc = 1,
358 .has_pooled_eu = 0,
356 GEN_DEFAULT_PIPEOFFSETS, 359 GEN_DEFAULT_PIPEOFFSETS,
357 IVB_CURSOR_OFFSETS, 360 IVB_CURSOR_OFFSETS,
358 BDW_COLORS, 361 BDW_COLORS,
@@ -515,8 +518,10 @@ void intel_detect_pch(struct drm_device *dev)
515 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 518 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
516 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 519 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
517 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 520 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
518 pch->subsystem_vendor == 0x1af4 && 521 pch->subsystem_vendor ==
519 pch->subsystem_device == 0x1100)) { 522 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
523 pch->subsystem_device ==
524 PCI_SUBDEVICE_ID_QEMU)) {
520 dev_priv->pch_type = intel_virt_detect_pch(dev); 525 dev_priv->pch_type = intel_virt_detect_pch(dev);
521 } else 526 } else
522 continue; 527 continue;
@@ -530,9 +535,9 @@ void intel_detect_pch(struct drm_device *dev)
530 pci_dev_put(pch); 535 pci_dev_put(pch);
531} 536}
532 537
533bool i915_semaphore_is_enabled(struct drm_device *dev) 538bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
534{ 539{
535 if (INTEL_INFO(dev)->gen < 6) 540 if (INTEL_GEN(dev_priv) < 6)
536 return false; 541 return false;
537 542
538 if (i915.semaphores >= 0) 543 if (i915.semaphores >= 0)
@@ -542,13 +547,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
542 if (i915.enable_execlists) 547 if (i915.enable_execlists)
543 return false; 548 return false;
544 549
545 /* Until we get further testing... */
546 if (IS_GEN8(dev))
547 return false;
548
549#ifdef CONFIG_INTEL_IOMMU 550#ifdef CONFIG_INTEL_IOMMU
550 /* Enable semaphores on SNB when IO remapping is off */ 551 /* Enable semaphores on SNB when IO remapping is off */
551 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 552 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
552 return false; 553 return false;
553#endif 554#endif
554 555
@@ -610,7 +611,7 @@ static int i915_drm_suspend(struct drm_device *dev)
610 611
611 intel_guc_suspend(dev); 612 intel_guc_suspend(dev);
612 613
613 intel_suspend_gt_powersave(dev); 614 intel_suspend_gt_powersave(dev_priv);
614 615
615 intel_display_suspend(dev); 616 intel_display_suspend(dev);
616 617
@@ -628,10 +629,10 @@ static int i915_drm_suspend(struct drm_device *dev)
628 i915_save_state(dev); 629 i915_save_state(dev);
629 630
630 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 631 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
631 intel_opregion_notify_adapter(dev, opregion_target_state); 632 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
632 633
633 intel_uncore_forcewake_reset(dev, false); 634 intel_uncore_forcewake_reset(dev_priv, false);
634 intel_opregion_fini(dev); 635 intel_opregion_unregister(dev_priv);
635 636
636 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 637 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
637 638
@@ -749,7 +750,7 @@ static int i915_drm_resume(struct drm_device *dev)
749 mutex_unlock(&dev->struct_mutex); 750 mutex_unlock(&dev->struct_mutex);
750 751
751 i915_restore_state(dev); 752 i915_restore_state(dev);
752 intel_opregion_setup(dev); 753 intel_opregion_setup(dev_priv);
753 754
754 intel_init_pch_refclk(dev); 755 intel_init_pch_refclk(dev);
755 drm_mode_config_reset(dev); 756 drm_mode_config_reset(dev);
@@ -777,7 +778,7 @@ static int i915_drm_resume(struct drm_device *dev)
777 778
778 spin_lock_irq(&dev_priv->irq_lock); 779 spin_lock_irq(&dev_priv->irq_lock);
779 if (dev_priv->display.hpd_irq_setup) 780 if (dev_priv->display.hpd_irq_setup)
780 dev_priv->display.hpd_irq_setup(dev); 781 dev_priv->display.hpd_irq_setup(dev_priv);
781 spin_unlock_irq(&dev_priv->irq_lock); 782 spin_unlock_irq(&dev_priv->irq_lock);
782 783
783 intel_dp_mst_resume(dev); 784 intel_dp_mst_resume(dev);
@@ -794,7 +795,7 @@ static int i915_drm_resume(struct drm_device *dev)
794 /* Config may have changed between suspend and resume */ 795 /* Config may have changed between suspend and resume */
795 drm_helper_hpd_irq_event(dev); 796 drm_helper_hpd_irq_event(dev);
796 797
797 intel_opregion_init(dev); 798 intel_opregion_register(dev_priv);
798 799
799 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 800 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
800 801
@@ -802,7 +803,7 @@ static int i915_drm_resume(struct drm_device *dev)
802 dev_priv->modeset_restore = MODESET_DONE; 803 dev_priv->modeset_restore = MODESET_DONE;
803 mutex_unlock(&dev_priv->modeset_restore_lock); 804 mutex_unlock(&dev_priv->modeset_restore_lock);
804 805
805 intel_opregion_notify_adapter(dev, PCI_D0); 806 intel_opregion_notify_adapter(dev_priv, PCI_D0);
806 807
807 drm_kms_helper_poll_enable(dev); 808 drm_kms_helper_poll_enable(dev);
808 809
@@ -870,9 +871,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
870 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 871 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
871 ret); 872 ret);
872 873
873 intel_uncore_early_sanitize(dev, true); 874 intel_uncore_early_sanitize(dev_priv, true);
874 875
875 if (IS_BROXTON(dev)) { 876 if (IS_BROXTON(dev_priv)) {
876 if (!dev_priv->suspended_to_idle) 877 if (!dev_priv->suspended_to_idle)
877 gen9_sanitize_dc_state(dev_priv); 878 gen9_sanitize_dc_state(dev_priv);
878 bxt_disable_dc9(dev_priv); 879 bxt_disable_dc9(dev_priv);
@@ -880,7 +881,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
880 hsw_disable_pc8(dev_priv); 881 hsw_disable_pc8(dev_priv);
881 } 882 }
882 883
883 intel_uncore_sanitize(dev); 884 intel_uncore_sanitize(dev_priv);
884 885
885 if (IS_BROXTON(dev_priv) || 886 if (IS_BROXTON(dev_priv) ||
886 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 887 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -923,14 +924,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
923 * - re-init interrupt state 924 * - re-init interrupt state
924 * - re-init display 925 * - re-init display
925 */ 926 */
926int i915_reset(struct drm_device *dev) 927int i915_reset(struct drm_i915_private *dev_priv)
927{ 928{
928 struct drm_i915_private *dev_priv = dev->dev_private; 929 struct drm_device *dev = dev_priv->dev;
929 struct i915_gpu_error *error = &dev_priv->gpu_error; 930 struct i915_gpu_error *error = &dev_priv->gpu_error;
930 unsigned reset_counter; 931 unsigned reset_counter;
931 int ret; 932 int ret;
932 933
933 intel_reset_gt_powersave(dev); 934 intel_reset_gt_powersave(dev_priv);
934 935
935 mutex_lock(&dev->struct_mutex); 936 mutex_lock(&dev->struct_mutex);
936 937
@@ -946,7 +947,7 @@ int i915_reset(struct drm_device *dev)
946 947
947 i915_gem_reset(dev); 948 i915_gem_reset(dev);
948 949
949 ret = intel_gpu_reset(dev, ALL_ENGINES); 950 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
950 951
951 /* Also reset the gpu hangman. */ 952 /* Also reset the gpu hangman. */
952 if (error->stop_rings != 0) { 953 if (error->stop_rings != 0) {
@@ -1001,7 +1002,7 @@ int i915_reset(struct drm_device *dev)
1001 * of re-init after reset. 1002 * of re-init after reset.
1002 */ 1003 */
1003 if (INTEL_INFO(dev)->gen > 5) 1004 if (INTEL_INFO(dev)->gen > 5)
1004 intel_enable_gt_powersave(dev); 1005 intel_enable_gt_powersave(dev_priv);
1005 1006
1006 return 0; 1007 return 0;
1007 1008
@@ -1030,13 +1031,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1030 if (PCI_FUNC(pdev->devfn)) 1031 if (PCI_FUNC(pdev->devfn))
1031 return -ENODEV; 1032 return -ENODEV;
1032 1033
1033 /* 1034 if (vga_switcheroo_client_probe_defer(pdev))
1034 * apple-gmux is needed on dual GPU MacBook Pro
1035 * to probe the panel if we're the inactive GPU.
1036 */
1037 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
1038 apple_gmux_present() && pdev != vga_default_device() &&
1039 !vga_switcheroo_handler_flags())
1040 return -EPROBE_DEFER; 1035 return -EPROBE_DEFER;
1041 1036
1042 return drm_get_pci_dev(pdev, ent, &driver); 1037 return drm_get_pci_dev(pdev, ent, &driver);
@@ -1115,6 +1110,49 @@ static int i915_pm_resume(struct device *dev)
1115 return i915_drm_resume(drm_dev); 1110 return i915_drm_resume(drm_dev);
1116} 1111}
1117 1112
1113/* freeze: before creating the hibernation_image */
1114static int i915_pm_freeze(struct device *dev)
1115{
1116 return i915_pm_suspend(dev);
1117}
1118
1119static int i915_pm_freeze_late(struct device *dev)
1120{
1121 int ret;
1122
1123 ret = i915_pm_suspend_late(dev);
1124 if (ret)
1125 return ret;
1126
1127 ret = i915_gem_freeze_late(dev_to_i915(dev));
1128 if (ret)
1129 return ret;
1130
1131 return 0;
1132}
1133
1134/* thaw: called after creating the hibernation image, but before turning off. */
1135static int i915_pm_thaw_early(struct device *dev)
1136{
1137 return i915_pm_resume_early(dev);
1138}
1139
1140static int i915_pm_thaw(struct device *dev)
1141{
1142 return i915_pm_resume(dev);
1143}
1144
1145/* restore: called after loading the hibernation image. */
1146static int i915_pm_restore_early(struct device *dev)
1147{
1148 return i915_pm_resume_early(dev);
1149}
1150
1151static int i915_pm_restore(struct device *dev)
1152{
1153 return i915_pm_resume(dev);
1154}
1155
1118/* 1156/*
1119 * Save all Gunit registers that may be lost after a D3 and a subsequent 1157 * Save all Gunit registers that may be lost after a D3 and a subsequent
1120 * S0i[R123] transition. The list of registers needing a save/restore is 1158 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1478,7 +1516,7 @@ static int intel_runtime_suspend(struct device *device)
1478 struct drm_i915_private *dev_priv = dev->dev_private; 1516 struct drm_i915_private *dev_priv = dev->dev_private;
1479 int ret; 1517 int ret;
1480 1518
1481 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1519 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
1482 return -ENODEV; 1520 return -ENODEV;
1483 1521
1484 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1522 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,7 +1555,7 @@ static int intel_runtime_suspend(struct device *device)
1517 1555
1518 intel_guc_suspend(dev); 1556 intel_guc_suspend(dev);
1519 1557
1520 intel_suspend_gt_powersave(dev); 1558 intel_suspend_gt_powersave(dev_priv);
1521 intel_runtime_pm_disable_interrupts(dev_priv); 1559 intel_runtime_pm_disable_interrupts(dev_priv);
1522 1560
1523 ret = 0; 1561 ret = 0;
@@ -1539,7 +1577,7 @@ static int intel_runtime_suspend(struct device *device)
1539 return ret; 1577 return ret;
1540 } 1578 }
1541 1579
1542 intel_uncore_forcewake_reset(dev, false); 1580 intel_uncore_forcewake_reset(dev_priv, false);
1543 1581
1544 enable_rpm_wakeref_asserts(dev_priv); 1582 enable_rpm_wakeref_asserts(dev_priv);
1545 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1583 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1553,14 +1591,14 @@ static int intel_runtime_suspend(struct device *device)
1553 * FIXME: We really should find a document that references the arguments 1591 * FIXME: We really should find a document that references the arguments
1554 * used below! 1592 * used below!
1555 */ 1593 */
1556 if (IS_BROADWELL(dev)) { 1594 if (IS_BROADWELL(dev_priv)) {
1557 /* 1595 /*
1558 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1596 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1559 * being detected, and the call we do at intel_runtime_resume() 1597 * being detected, and the call we do at intel_runtime_resume()
1560 * won't be able to restore them. Since PCI_D3hot matches the 1598 * won't be able to restore them. Since PCI_D3hot matches the
1561 * actual specification and appears to be working, use it. 1599 * actual specification and appears to be working, use it.
1562 */ 1600 */
1563 intel_opregion_notify_adapter(dev, PCI_D3hot); 1601 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1564 } else { 1602 } else {
1565 /* 1603 /*
1566 * current versions of firmware which depend on this opregion 1604 * current versions of firmware which depend on this opregion
@@ -1569,7 +1607,7 @@ static int intel_runtime_suspend(struct device *device)
1569 * to distinguish it from notifications that might be sent via 1607 * to distinguish it from notifications that might be sent via
1570 * the suspend path. 1608 * the suspend path.
1571 */ 1609 */
1572 intel_opregion_notify_adapter(dev, PCI_D1); 1610 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1573 } 1611 }
1574 1612
1575 assert_forcewakes_inactive(dev_priv); 1613 assert_forcewakes_inactive(dev_priv);
@@ -1593,7 +1631,7 @@ static int intel_runtime_resume(struct device *device)
1593 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1631 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1594 disable_rpm_wakeref_asserts(dev_priv); 1632 disable_rpm_wakeref_asserts(dev_priv);
1595 1633
1596 intel_opregion_notify_adapter(dev, PCI_D0); 1634 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1597 dev_priv->pm.suspended = false; 1635 dev_priv->pm.suspended = false;
1598 if (intel_uncore_unclaimed_mmio(dev_priv)) 1636 if (intel_uncore_unclaimed_mmio(dev_priv))
1599 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1637 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1620,7 +1658,7 @@ static int intel_runtime_resume(struct device *device)
1620 * we can do is to hope that things will still work (and disable RPM). 1658 * we can do is to hope that things will still work (and disable RPM).
1621 */ 1659 */
1622 i915_gem_init_swizzling(dev); 1660 i915_gem_init_swizzling(dev);
1623 gen6_update_ring_freq(dev); 1661 gen6_update_ring_freq(dev_priv);
1624 1662
1625 intel_runtime_pm_enable_interrupts(dev_priv); 1663 intel_runtime_pm_enable_interrupts(dev_priv);
1626 1664
@@ -1632,7 +1670,7 @@ static int intel_runtime_resume(struct device *device)
1632 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1670 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1633 intel_hpd_init(dev_priv); 1671 intel_hpd_init(dev_priv);
1634 1672
1635 intel_enable_gt_powersave(dev); 1673 intel_enable_gt_powersave(dev_priv);
1636 1674
1637 enable_rpm_wakeref_asserts(dev_priv); 1675 enable_rpm_wakeref_asserts(dev_priv);
1638 1676
@@ -1669,14 +1707,14 @@ static const struct dev_pm_ops i915_pm_ops = {
1669 * @restore, @restore_early : called after rebooting and restoring the 1707 * @restore, @restore_early : called after rebooting and restoring the
1670 * hibernation image [PMSG_RESTORE] 1708 * hibernation image [PMSG_RESTORE]
1671 */ 1709 */
1672 .freeze = i915_pm_suspend, 1710 .freeze = i915_pm_freeze,
1673 .freeze_late = i915_pm_suspend_late, 1711 .freeze_late = i915_pm_freeze_late,
1674 .thaw_early = i915_pm_resume_early, 1712 .thaw_early = i915_pm_thaw_early,
1675 .thaw = i915_pm_resume, 1713 .thaw = i915_pm_thaw,
1676 .poweroff = i915_pm_suspend, 1714 .poweroff = i915_pm_suspend,
1677 .poweroff_late = i915_pm_poweroff_late, 1715 .poweroff_late = i915_pm_poweroff_late,
1678 .restore_early = i915_pm_resume_early, 1716 .restore_early = i915_pm_restore_early,
1679 .restore = i915_pm_resume, 1717 .restore = i915_pm_restore,
1680 1718
1681 /* S0ix (via runtime suspend) event handlers */ 1719 /* S0ix (via runtime suspend) event handlers */
1682 .runtime_suspend = intel_runtime_suspend, 1720 .runtime_suspend = intel_runtime_suspend,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5faacc6e548d..24a86c64d22e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -47,6 +47,7 @@
47#include <drm/intel-gtt.h> 47#include <drm/intel-gtt.h>
48#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 48#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
49#include <drm/drm_gem.h> 49#include <drm/drm_gem.h>
50#include <drm/drm_auth.h>
50 51
51#include "i915_params.h" 52#include "i915_params.h"
52#include "i915_reg.h" 53#include "i915_reg.h"
@@ -61,12 +62,14 @@
61#include "i915_gem_gtt.h" 62#include "i915_gem_gtt.h"
62#include "i915_gem_render_state.h" 63#include "i915_gem_render_state.h"
63 64
65#include "intel_gvt.h"
66
64/* General customization: 67/* General customization:
65 */ 68 */
66 69
67#define DRIVER_NAME "i915" 70#define DRIVER_NAME "i915"
68#define DRIVER_DESC "Intel Graphics" 71#define DRIVER_DESC "Intel Graphics"
69#define DRIVER_DATE "20160425" 72#define DRIVER_DATE "20160620"
70 73
71#undef WARN_ON 74#undef WARN_ON
72/* Many gcc seem to no see through this and fall over :( */ 75/* Many gcc seem to no see through this and fall over :( */
@@ -324,6 +327,12 @@ struct i915_hotplug {
324 &dev->mode_config.plane_list, \ 327 &dev->mode_config.plane_list, \
325 base.head) 328 base.head)
326 329
330#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
331 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
332 base.head) \
333 for_each_if ((plane_mask) & \
334 (1 << drm_plane_index(&intel_plane->base)))
335
327#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 336#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
328 list_for_each_entry(intel_plane, \ 337 list_for_each_entry(intel_plane, \
329 &(dev)->mode_config.plane_list, \ 338 &(dev)->mode_config.plane_list, \
@@ -333,6 +342,10 @@ struct i915_hotplug {
333#define for_each_intel_crtc(dev, intel_crtc) \ 342#define for_each_intel_crtc(dev, intel_crtc) \
334 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 343 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
335 344
345#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
346 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
347 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
348
336#define for_each_intel_encoder(dev, intel_encoder) \ 349#define for_each_intel_encoder(dev, intel_encoder) \
337 list_for_each_entry(intel_encoder, \ 350 list_for_each_entry(intel_encoder, \
338 &(dev)->mode_config.encoder_list, \ 351 &(dev)->mode_config.encoder_list, \
@@ -588,6 +601,7 @@ struct drm_i915_display_funcs {
588 struct intel_crtc_state *newstate); 601 struct intel_crtc_state *newstate);
589 void (*initial_watermarks)(struct intel_crtc_state *cstate); 602 void (*initial_watermarks)(struct intel_crtc_state *cstate);
590 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 603 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
604 int (*compute_global_watermarks)(struct drm_atomic_state *state);
591 void (*update_wm)(struct drm_crtc *crtc); 605 void (*update_wm)(struct drm_crtc *crtc);
592 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 606 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
593 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 607 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +626,7 @@ struct drm_i915_display_funcs {
612 struct drm_i915_gem_object *obj, 626 struct drm_i915_gem_object *obj,
613 struct drm_i915_gem_request *req, 627 struct drm_i915_gem_request *req,
614 uint32_t flags); 628 uint32_t flags);
615 void (*hpd_irq_setup)(struct drm_device *dev); 629 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
616 /* clock updates for mode set */ 630 /* clock updates for mode set */
617 /* cursor updates */ 631 /* cursor updates */
618 /* render clock increase/decrease */ 632 /* render clock increase/decrease */
@@ -735,6 +749,7 @@ struct intel_csr {
735 func(is_valleyview) sep \ 749 func(is_valleyview) sep \
736 func(is_cherryview) sep \ 750 func(is_cherryview) sep \
737 func(is_haswell) sep \ 751 func(is_haswell) sep \
752 func(is_broadwell) sep \
738 func(is_skylake) sep \ 753 func(is_skylake) sep \
739 func(is_broxton) sep \ 754 func(is_broxton) sep \
740 func(is_kabylake) sep \ 755 func(is_kabylake) sep \
@@ -749,7 +764,8 @@ struct intel_csr {
749 func(has_llc) sep \ 764 func(has_llc) sep \
750 func(has_snoop) sep \ 765 func(has_snoop) sep \
751 func(has_ddi) sep \ 766 func(has_ddi) sep \
752 func(has_fpga_dbg) 767 func(has_fpga_dbg) sep \
768 func(has_pooled_eu)
753 769
754#define DEFINE_FLAG(name) u8 name:1 770#define DEFINE_FLAG(name) u8 name:1
755#define SEP_SEMICOLON ; 771#define SEP_SEMICOLON ;
@@ -757,9 +773,10 @@ struct intel_csr {
757struct intel_device_info { 773struct intel_device_info {
758 u32 display_mmio_offset; 774 u32 display_mmio_offset;
759 u16 device_id; 775 u16 device_id;
760 u8 num_pipes:3; 776 u8 num_pipes;
761 u8 num_sprites[I915_MAX_PIPES]; 777 u8 num_sprites[I915_MAX_PIPES];
762 u8 gen; 778 u8 gen;
779 u16 gen_mask;
763 u8 ring_mask; /* Rings supported by the HW */ 780 u8 ring_mask; /* Rings supported by the HW */
764 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 781 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
765 /* Register offsets for the various display pipes and transcoders */ 782 /* Register offsets for the various display pipes and transcoders */
@@ -774,6 +791,7 @@ struct intel_device_info {
774 u8 subslice_per_slice; 791 u8 subslice_per_slice;
775 u8 eu_total; 792 u8 eu_total;
776 u8 eu_per_subslice; 793 u8 eu_per_subslice;
794 u8 min_eu_in_pool;
777 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 795 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
778 u8 subslice_7eu[3]; 796 u8 subslice_7eu[3];
779 u8 has_slice_pg:1; 797 u8 has_slice_pg:1;
@@ -821,9 +839,8 @@ struct i915_ctx_hang_stats {
821/* This must match up with the value previously used for execbuf2.rsvd1. */ 839/* This must match up with the value previously used for execbuf2.rsvd1. */
822#define DEFAULT_CONTEXT_HANDLE 0 840#define DEFAULT_CONTEXT_HANDLE 0
823 841
824#define CONTEXT_NO_ZEROMAP (1<<0)
825/** 842/**
826 * struct intel_context - as the name implies, represents a context. 843 * struct i915_gem_context - as the name implies, represents a context.
827 * @ref: reference count. 844 * @ref: reference count.
828 * @user_handle: userspace tracking identity for this context. 845 * @user_handle: userspace tracking identity for this context.
829 * @remap_slice: l3 row remapping information. 846 * @remap_slice: l3 row remapping information.
@@ -841,33 +858,37 @@ struct i915_ctx_hang_stats {
841 * Contexts are memory images used by the hardware to store copies of their 858 * Contexts are memory images used by the hardware to store copies of their
842 * internal state. 859 * internal state.
843 */ 860 */
844struct intel_context { 861struct i915_gem_context {
845 struct kref ref; 862 struct kref ref;
846 int user_handle;
847 uint8_t remap_slice;
848 struct drm_i915_private *i915; 863 struct drm_i915_private *i915;
849 int flags;
850 struct drm_i915_file_private *file_priv; 864 struct drm_i915_file_private *file_priv;
851 struct i915_ctx_hang_stats hang_stats;
852 struct i915_hw_ppgtt *ppgtt; 865 struct i915_hw_ppgtt *ppgtt;
853 866
854 /* Legacy ring buffer submission */ 867 struct i915_ctx_hang_stats hang_stats;
855 struct {
856 struct drm_i915_gem_object *rcs_state;
857 bool initialized;
858 } legacy_hw_ctx;
859 868
860 /* Execlists */ 869 /* Unique identifier for this context, used by the hw for tracking */
861 struct { 870 unsigned long flags;
871 unsigned hw_id;
872 u32 user_handle;
873#define CONTEXT_NO_ZEROMAP (1<<0)
874
875 struct intel_context {
862 struct drm_i915_gem_object *state; 876 struct drm_i915_gem_object *state;
863 struct intel_ringbuffer *ringbuf; 877 struct intel_ringbuffer *ringbuf;
864 int pin_count;
865 struct i915_vma *lrc_vma; 878 struct i915_vma *lrc_vma;
866 u64 lrc_desc;
867 uint32_t *lrc_reg_state; 879 uint32_t *lrc_reg_state;
880 u64 lrc_desc;
881 int pin_count;
882 bool initialised;
868 } engine[I915_NUM_ENGINES]; 883 } engine[I915_NUM_ENGINES];
884 u32 ring_size;
885 u32 desc_template;
886 struct atomic_notifier_head status_notifier;
887 bool execlists_force_single_submission;
869 888
870 struct list_head link; 889 struct list_head link;
890
891 u8 remap_slice;
871}; 892};
872 893
873enum fb_op_origin { 894enum fb_op_origin {
@@ -1115,6 +1136,8 @@ struct intel_gen6_power_mgmt {
1115 bool interrupts_enabled; 1136 bool interrupts_enabled;
1116 u32 pm_iir; 1137 u32 pm_iir;
1117 1138
1139 u32 pm_intr_keep;
1140
1118 /* Frequencies are stored in potentially platform dependent multiples. 1141 /* Frequencies are stored in potentially platform dependent multiples.
1119 * In other words, *_freq needs to be multiplied by X to be interesting. 1142 * In other words, *_freq needs to be multiplied by X to be interesting.
1120 * Soft limits are those which are used for the dynamic reclocking done 1143 * Soft limits are those which are used for the dynamic reclocking done
@@ -1488,6 +1511,7 @@ struct intel_vbt_data {
1488 bool present; 1511 bool present;
1489 bool active_low_pwm; 1512 bool active_low_pwm;
1490 u8 min_brightness; /* min_brightness/255 of max */ 1513 u8 min_brightness; /* min_brightness/255 of max */
1514 enum intel_backlight_type type;
1491 } backlight; 1515 } backlight;
1492 1516
1493 /* MIPI DSI */ 1517 /* MIPI DSI */
@@ -1580,7 +1604,7 @@ struct skl_ddb_allocation {
1580}; 1604};
1581 1605
1582struct skl_wm_values { 1606struct skl_wm_values {
1583 bool dirty[I915_MAX_PIPES]; 1607 unsigned dirty_pipes;
1584 struct skl_ddb_allocation ddb; 1608 struct skl_ddb_allocation ddb;
1585 uint32_t wm_linetime[I915_MAX_PIPES]; 1609 uint32_t wm_linetime[I915_MAX_PIPES];
1586 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1610 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1697,7 +1721,7 @@ struct i915_execbuffer_params {
1697 uint64_t batch_obj_vm_offset; 1721 uint64_t batch_obj_vm_offset;
1698 struct intel_engine_cs *engine; 1722 struct intel_engine_cs *engine;
1699 struct drm_i915_gem_object *batch_obj; 1723 struct drm_i915_gem_object *batch_obj;
1700 struct intel_context *ctx; 1724 struct i915_gem_context *ctx;
1701 struct drm_i915_gem_request *request; 1725 struct drm_i915_gem_request *request;
1702}; 1726};
1703 1727
@@ -1724,6 +1748,8 @@ struct drm_i915_private {
1724 1748
1725 struct i915_virtual_gpu vgpu; 1749 struct i915_virtual_gpu vgpu;
1726 1750
1751 struct intel_gvt gvt;
1752
1727 struct intel_guc guc; 1753 struct intel_guc guc;
1728 1754
1729 struct intel_csr csr; 1755 struct intel_csr csr;
@@ -1747,6 +1773,7 @@ struct drm_i915_private {
1747 wait_queue_head_t gmbus_wait_queue; 1773 wait_queue_head_t gmbus_wait_queue;
1748 1774
1749 struct pci_dev *bridge_dev; 1775 struct pci_dev *bridge_dev;
1776 struct i915_gem_context *kernel_context;
1750 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1777 struct intel_engine_cs engine[I915_NUM_ENGINES];
1751 struct drm_i915_gem_object *semaphore_obj; 1778 struct drm_i915_gem_object *semaphore_obj;
1752 uint32_t last_seqno, next_seqno; 1779 uint32_t last_seqno, next_seqno;
@@ -1802,13 +1829,17 @@ struct drm_i915_private {
1802 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1829 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1803 1830
1804 unsigned int fsb_freq, mem_freq, is_ddr3; 1831 unsigned int fsb_freq, mem_freq, is_ddr3;
1805 unsigned int skl_boot_cdclk; 1832 unsigned int skl_preferred_vco_freq;
1806 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1833 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
1807 unsigned int max_dotclk_freq; 1834 unsigned int max_dotclk_freq;
1808 unsigned int rawclk_freq; 1835 unsigned int rawclk_freq;
1809 unsigned int hpll_freq; 1836 unsigned int hpll_freq;
1810 unsigned int czclk_freq; 1837 unsigned int czclk_freq;
1811 1838
1839 struct {
1840 unsigned int vco, ref;
1841 } cdclk_pll;
1842
1812 /** 1843 /**
1813 * wq - Driver workqueue for GEM. 1844 * wq - Driver workqueue for GEM.
1814 * 1845 *
@@ -1838,6 +1869,13 @@ struct drm_i915_private {
1838 DECLARE_HASHTABLE(mm_structs, 7); 1869 DECLARE_HASHTABLE(mm_structs, 7);
1839 struct mutex mm_lock; 1870 struct mutex mm_lock;
1840 1871
1872 /* The hw wants to have a stable context identifier for the lifetime
1873 * of the context (for OA, PASID, faults, etc). This is limited
1874 * in execlists to 21 bits.
1875 */
1876 struct ida context_hw_ida;
1877#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1878
1841 /* Kernel Modesetting */ 1879 /* Kernel Modesetting */
1842 1880
1843 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1881 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1950,9 +1988,6 @@ struct drm_i915_private {
1950 */ 1988 */
1951 uint16_t skl_latency[8]; 1989 uint16_t skl_latency[8];
1952 1990
1953 /* Committed wm config */
1954 struct intel_wm_config config;
1955
1956 /* 1991 /*
1957 * The skl_wm_values structure is a bit too big for stack 1992 * The skl_wm_values structure is a bit too big for stack
1958 * allocation, so we keep the staging struct where we store 1993 * allocation, so we keep the staging struct where we store
@@ -1975,6 +2010,13 @@ struct drm_i915_private {
1975 * cstate->wm.need_postvbl_update. 2010 * cstate->wm.need_postvbl_update.
1976 */ 2011 */
1977 struct mutex wm_mutex; 2012 struct mutex wm_mutex;
2013
2014 /*
2015 * Set during HW readout of watermarks/DDB. Some platforms
2016 * need to know when we're still using BIOS-provided values
2017 * (which we don't fully trust).
2018 */
2019 bool distrust_bios_wm;
1978 } wm; 2020 } wm;
1979 2021
1980 struct i915_runtime_pm pm; 2022 struct i915_runtime_pm pm;
@@ -1989,8 +2031,6 @@ struct drm_i915_private {
1989 void (*stop_engine)(struct intel_engine_cs *engine); 2031 void (*stop_engine)(struct intel_engine_cs *engine);
1990 } gt; 2032 } gt;
1991 2033
1992 struct intel_context *kernel_context;
1993
1994 /* perform PHY state sanity checks? */ 2034 /* perform PHY state sanity checks? */
1995 bool chv_phy_assert[2]; 2035 bool chv_phy_assert[2];
1996 2036
@@ -2227,9 +2267,75 @@ struct drm_i915_gem_object {
2227}; 2267};
2228#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2268#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2229 2269
2230void i915_gem_track_fb(struct drm_i915_gem_object *old, 2270/*
2231 struct drm_i915_gem_object *new, 2271 * Optimised SGL iterator for GEM objects
2232 unsigned frontbuffer_bits); 2272 */
2273static __always_inline struct sgt_iter {
2274 struct scatterlist *sgp;
2275 union {
2276 unsigned long pfn;
2277 dma_addr_t dma;
2278 };
2279 unsigned int curr;
2280 unsigned int max;
2281} __sgt_iter(struct scatterlist *sgl, bool dma) {
2282 struct sgt_iter s = { .sgp = sgl };
2283
2284 if (s.sgp) {
2285 s.max = s.curr = s.sgp->offset;
2286 s.max += s.sgp->length;
2287 if (dma)
2288 s.dma = sg_dma_address(s.sgp);
2289 else
2290 s.pfn = page_to_pfn(sg_page(s.sgp));
2291 }
2292
2293 return s;
2294}
2295
2296/**
2297 * __sg_next - return the next scatterlist entry in a list
2298 * @sg: The current sg entry
2299 *
2300 * Description:
2301 * If the entry is the last, return NULL; otherwise, step to the next
2302 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2303 * otherwise just return the pointer to the current element.
2304 **/
2305static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2306{
2307#ifdef CONFIG_DEBUG_SG
2308 BUG_ON(sg->sg_magic != SG_MAGIC);
2309#endif
2310 return sg_is_last(sg) ? NULL :
2311 likely(!sg_is_chain(++sg)) ? sg :
2312 sg_chain_ptr(sg);
2313}
2314
2315/**
2316 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2317 * @__dmap: DMA address (output)
2318 * @__iter: 'struct sgt_iter' (iterator state, internal)
2319 * @__sgt: sg_table to iterate over (input)
2320 */
2321#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2322 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2323 ((__dmap) = (__iter).dma + (__iter).curr); \
2324 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2325 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2326
2327/**
2328 * for_each_sgt_page - iterate over the pages of the given sg_table
2329 * @__pp: page pointer (output)
2330 * @__iter: 'struct sgt_iter' (iterator state, internal)
2331 * @__sgt: sg_table to iterate over (input)
2332 */
2333#define for_each_sgt_page(__pp, __iter, __sgt) \
2334 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2335 ((__pp) = (__iter).pfn == 0 ? NULL : \
2336 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2337 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2338 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2233 2339
2234/** 2340/**
2235 * Request queue structure. 2341 * Request queue structure.
@@ -2278,6 +2384,9 @@ struct drm_i915_gem_request {
2278 /** Position in the ringbuffer of the end of the whole request */ 2384 /** Position in the ringbuffer of the end of the whole request */
2279 u32 tail; 2385 u32 tail;
2280 2386
2387 /** Preallocate space in the ringbuffer for the emitting the request */
2388 u32 reserved_space;
2389
2281 /** 2390 /**
2282 * Context and ring buffer related to this request 2391 * Context and ring buffer related to this request
2283 * Contexts are refcounted, so when this request is associated with a 2392 * Contexts are refcounted, so when this request is associated with a
@@ -2288,9 +2397,20 @@ struct drm_i915_gem_request {
2288 * i915_gem_request_free() will then decrement the refcount on the 2397 * i915_gem_request_free() will then decrement the refcount on the
2289 * context. 2398 * context.
2290 */ 2399 */
2291 struct intel_context *ctx; 2400 struct i915_gem_context *ctx;
2292 struct intel_ringbuffer *ringbuf; 2401 struct intel_ringbuffer *ringbuf;
2293 2402
2403 /**
2404 * Context related to the previous request.
2405 * As the contexts are accessed by the hardware until the switch is
2406 * completed to a new context, the hardware may still be writing
2407 * to the context object after the breadcrumb is visible. We must
2408 * not unpin/unbind/prune that object whilst still active and so
2409 * we keep the previous context pinned until the following (this)
2410 * request is retired.
2411 */
2412 struct i915_gem_context *previous_context;
2413
2294 /** Batch buffer related to this request if any (used for 2414 /** Batch buffer related to this request if any (used for
2295 error state dump only) */ 2415 error state dump only) */
2296 struct drm_i915_gem_object *batch_obj; 2416 struct drm_i915_gem_object *batch_obj;
@@ -2327,11 +2447,13 @@ struct drm_i915_gem_request {
2327 /** Execlists no. of times this request has been sent to the ELSP */ 2447 /** Execlists no. of times this request has been sent to the ELSP */
2328 int elsp_submitted; 2448 int elsp_submitted;
2329 2449
2450 /** Execlists context hardware id. */
2451 unsigned ctx_hw_id;
2330}; 2452};
2331 2453
2332struct drm_i915_gem_request * __must_check 2454struct drm_i915_gem_request * __must_check
2333i915_gem_request_alloc(struct intel_engine_cs *engine, 2455i915_gem_request_alloc(struct intel_engine_cs *engine,
2334 struct intel_context *ctx); 2456 struct i915_gem_context *ctx);
2335void i915_gem_request_free(struct kref *req_ref); 2457void i915_gem_request_free(struct kref *req_ref);
2336int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2458int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
2337 struct drm_file *file); 2459 struct drm_file *file);
@@ -2359,23 +2481,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
2359static inline void 2481static inline void
2360i915_gem_request_unreference(struct drm_i915_gem_request *req) 2482i915_gem_request_unreference(struct drm_i915_gem_request *req)
2361{ 2483{
2362 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
2363 kref_put(&req->ref, i915_gem_request_free); 2484 kref_put(&req->ref, i915_gem_request_free);
2364} 2485}
2365 2486
2366static inline void
2367i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
2368{
2369 struct drm_device *dev;
2370
2371 if (!req)
2372 return;
2373
2374 dev = req->engine->dev;
2375 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
2376 mutex_unlock(&dev->struct_mutex);
2377}
2378
2379static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2487static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
2380 struct drm_i915_gem_request *src) 2488 struct drm_i915_gem_request *src)
2381{ 2489{
@@ -2503,9 +2611,29 @@ struct drm_i915_cmd_table {
2503#define INTEL_INFO(p) (&__I915__(p)->info) 2611#define INTEL_INFO(p) (&__I915__(p)->info)
2504#define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2612#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
2505#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2613#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2506#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2507 2614
2508#define REVID_FOREVER 0xff 2615#define REVID_FOREVER 0xff
2616#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2617
2618#define GEN_FOREVER (0)
2619/*
2620 * Returns true if Gen is in inclusive range [Start, End].
2621 *
2622 * Use GEN_FOREVER for unbound start and or end.
2623 */
2624#define IS_GEN(p, s, e) ({ \
2625 unsigned int __s = (s), __e = (e); \
2626 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2627 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2628 if ((__s) != GEN_FOREVER) \
2629 __s = (s) - 1; \
2630 if ((__e) == GEN_FOREVER) \
2631 __e = BITS_PER_LONG - 1; \
2632 else \
2633 __e = (e) - 1; \
2634 !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
2635})
2636
2509/* 2637/*
2510 * Return true if revision is in range [since,until] inclusive. 2638 * Return true if revision is in range [since,until] inclusive.
2511 * 2639 *
@@ -2538,7 +2666,7 @@ struct drm_i915_cmd_table {
2538#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2666#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2539#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2667#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
2540#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2668#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2541#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2669#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
2542#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2670#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2543#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2671#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2544#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2672#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2600,20 +2728,29 @@ struct drm_i915_cmd_table {
2600 2728
2601#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) 2729#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
2602 2730
2731#define KBL_REVID_A0 0x0
2732#define KBL_REVID_B0 0x1
2733#define KBL_REVID_C0 0x2
2734#define KBL_REVID_D0 0x3
2735#define KBL_REVID_E0 0x4
2736
2737#define IS_KBL_REVID(p, since, until) \
2738 (IS_KABYLAKE(p) && IS_REVID(p, since, until))
2739
2603/* 2740/*
2604 * The genX designation typically refers to the render engine, so render 2741 * The genX designation typically refers to the render engine, so render
2605 * capability related checks should use IS_GEN, while display and other checks 2742 * capability related checks should use IS_GEN, while display and other checks
2606 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2743 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2607 * chips, etc.). 2744 * chips, etc.).
2608 */ 2745 */
2609#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2746#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
2610#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2747#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
2611#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2748#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
2612#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2749#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
2613#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2750#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
2614#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2751#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
2615#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2752#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
2616#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2753#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
2617 2754
2618#define RENDER_RING (1<<RCS) 2755#define RENDER_RING (1<<RCS)
2619#define BSD_RING (1<<VCS) 2756#define BSD_RING (1<<VCS)
@@ -2686,12 +2823,18 @@ struct drm_i915_cmd_table {
2686 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2823 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
2687 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2824 IS_KABYLAKE(dev) || IS_BROXTON(dev))
2688#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2825#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2689#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2826#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
2690 2827
2691#define HAS_CSR(dev) (IS_GEN9(dev)) 2828#define HAS_CSR(dev) (IS_GEN9(dev))
2692 2829
2693#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2830/*
2694#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2831 * For now, anything with a GuC requires uCode loading, and then supports
2832 * command submission once loaded. But these are logically independent
2833 * properties, so we have separate macros to test them.
2834 */
2835#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
2836#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2837#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
2695 2838
2696#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2839#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2697 INTEL_INFO(dev)->gen >= 8) 2840 INTEL_INFO(dev)->gen >= 8)
@@ -2700,6 +2843,8 @@ struct drm_i915_cmd_table {
2700 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ 2843 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
2701 !IS_BROXTON(dev)) 2844 !IS_BROXTON(dev))
2702 2845
2846#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
2847
2703#define INTEL_PCH_DEVICE_ID_MASK 0xff00 2848#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2704#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2849#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2705#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2850#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2740,6 +2885,9 @@ extern int i915_max_ioctl;
2740extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2885extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2741extern int i915_resume_switcheroo(struct drm_device *dev); 2886extern int i915_resume_switcheroo(struct drm_device *dev);
2742 2887
2888int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2889 int enable_ppgtt);
2890
2743/* i915_dma.c */ 2891/* i915_dma.c */
2744void __printf(3, 4) 2892void __printf(3, 4)
2745__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2893__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -2760,9 +2908,9 @@ extern void i915_driver_postclose(struct drm_device *dev,
2760extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2908extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2761 unsigned long arg); 2909 unsigned long arg);
2762#endif 2910#endif
2763extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2911extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2764extern bool intel_has_gpu_reset(struct drm_device *dev); 2912extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2765extern int i915_reset(struct drm_device *dev); 2913extern int i915_reset(struct drm_i915_private *dev_priv);
2766extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2914extern int intel_guc_reset(struct drm_i915_private *dev_priv);
2767extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2915extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2768extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2916extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2772,30 +2920,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2772int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2920int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2773 2921
2774/* intel_hotplug.c */ 2922/* intel_hotplug.c */
2775void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2923void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2924 u32 pin_mask, u32 long_mask);
2776void intel_hpd_init(struct drm_i915_private *dev_priv); 2925void intel_hpd_init(struct drm_i915_private *dev_priv);
2777void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2926void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2778void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2927void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2779bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2928bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2780 2929
2781/* i915_irq.c */ 2930/* i915_irq.c */
2782void i915_queue_hangcheck(struct drm_device *dev); 2931void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
2783__printf(3, 4) 2932__printf(3, 4)
2784void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2933void i915_handle_error(struct drm_i915_private *dev_priv,
2934 u32 engine_mask,
2785 const char *fmt, ...); 2935 const char *fmt, ...);
2786 2936
2787extern void intel_irq_init(struct drm_i915_private *dev_priv); 2937extern void intel_irq_init(struct drm_i915_private *dev_priv);
2788int intel_irq_install(struct drm_i915_private *dev_priv); 2938int intel_irq_install(struct drm_i915_private *dev_priv);
2789void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2939void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2790 2940
2791extern void intel_uncore_sanitize(struct drm_device *dev); 2941extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2792extern void intel_uncore_early_sanitize(struct drm_device *dev, 2942extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
2793 bool restore_forcewake); 2943 bool restore_forcewake);
2794extern void intel_uncore_init(struct drm_device *dev); 2944extern void intel_uncore_init(struct drm_i915_private *dev_priv);
2795extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2945extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
2796extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2946extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
2797extern void intel_uncore_fini(struct drm_device *dev); 2947extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2798extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2948extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
2949 bool restore);
2799const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2950const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2800void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2951void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2801 enum forcewake_domains domains); 2952 enum forcewake_domains domains);
@@ -2811,9 +2962,15 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2811u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2962u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2812 2963
2813void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2964void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2814static inline bool intel_vgpu_active(struct drm_device *dev) 2965
2966static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2815{ 2967{
2816 return to_i915(dev)->vgpu.active; 2968 return dev_priv->gvt.initialized;
2969}
2970
2971static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2972{
2973 return dev_priv->vgpu.active;
2817} 2974}
2818 2975
2819void 2976void
@@ -2909,7 +3066,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
2909 struct drm_file *file_priv); 3066 struct drm_file *file_priv);
2910int i915_gem_get_tiling(struct drm_device *dev, void *data, 3067int i915_gem_get_tiling(struct drm_device *dev, void *data,
2911 struct drm_file *file_priv); 3068 struct drm_file *file_priv);
2912int i915_gem_init_userptr(struct drm_device *dev); 3069void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2913int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3070int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2914 struct drm_file *file); 3071 struct drm_file *file);
2915int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3072int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2919,11 +3076,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2919void i915_gem_load_init(struct drm_device *dev); 3076void i915_gem_load_init(struct drm_device *dev);
2920void i915_gem_load_cleanup(struct drm_device *dev); 3077void i915_gem_load_cleanup(struct drm_device *dev);
2921void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3078void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3079int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3080
2922void *i915_gem_object_alloc(struct drm_device *dev); 3081void *i915_gem_object_alloc(struct drm_device *dev);
2923void i915_gem_object_free(struct drm_i915_gem_object *obj); 3082void i915_gem_object_free(struct drm_i915_gem_object *obj);
2924void i915_gem_object_init(struct drm_i915_gem_object *obj, 3083void i915_gem_object_init(struct drm_i915_gem_object *obj,
2925 const struct drm_i915_gem_object_ops *ops); 3084 const struct drm_i915_gem_object_ops *ops);
2926struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3085struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
2927 size_t size); 3086 size_t size);
2928struct drm_i915_gem_object *i915_gem_object_create_from_data( 3087struct drm_i915_gem_object *i915_gem_object_create_from_data(
2929 struct drm_device *dev, const void *data, size_t size); 3088 struct drm_device *dev, const void *data, size_t size);
@@ -2978,6 +3137,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
2978struct page * 3137struct page *
2979i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 3138i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
2980 3139
3140static inline dma_addr_t
3141i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
3142{
3143 if (n < obj->get_page.last) {
3144 obj->get_page.sg = obj->pages->sgl;
3145 obj->get_page.last = 0;
3146 }
3147
3148 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3149 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3150 if (unlikely(sg_is_chain(obj->get_page.sg)))
3151 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3152 }
3153
3154 return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
3155}
3156
2981static inline struct page * 3157static inline struct page *
2982i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 3158i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2983{ 3159{
@@ -3054,6 +3230,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
3054 struct drm_mode_create_dumb *args); 3230 struct drm_mode_create_dumb *args);
3055int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3231int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3056 uint32_t handle, uint64_t *offset); 3232 uint32_t handle, uint64_t *offset);
3233
3234void i915_gem_track_fb(struct drm_i915_gem_object *old,
3235 struct drm_i915_gem_object *new,
3236 unsigned frontbuffer_bits);
3237
3057/** 3238/**
3058 * Returns true if seq1 is later than seq2. 3239 * Returns true if seq1 is later than seq2.
3059 */ 3240 */
@@ -3081,13 +3262,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
3081 req->seqno); 3262 req->seqno);
3082} 3263}
3083 3264
3084int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3265int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
3085int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3266int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3086 3267
3087struct drm_i915_gem_request * 3268struct drm_i915_gem_request *
3088i915_gem_find_active_request(struct intel_engine_cs *engine); 3269i915_gem_find_active_request(struct intel_engine_cs *engine);
3089 3270
3090bool i915_gem_retire_requests(struct drm_device *dev); 3271bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3091void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3272void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
3092 3273
3093static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3274static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3147,7 +3328,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3147int __must_check i915_gem_init(struct drm_device *dev); 3328int __must_check i915_gem_init(struct drm_device *dev);
3148int i915_gem_init_engines(struct drm_device *dev); 3329int i915_gem_init_engines(struct drm_device *dev);
3149int __must_check i915_gem_init_hw(struct drm_device *dev); 3330int __must_check i915_gem_init_hw(struct drm_device *dev);
3150int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
3151void i915_gem_init_swizzling(struct drm_device *dev); 3331void i915_gem_init_swizzling(struct drm_device *dev);
3152void i915_gem_cleanup_engines(struct drm_device *dev); 3332void i915_gem_cleanup_engines(struct drm_device *dev);
3153int __must_check i915_gpu_idle(struct drm_device *dev); 3333int __must_check i915_gpu_idle(struct drm_device *dev);
@@ -3215,8 +3395,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
3215bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3395bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
3216 struct i915_address_space *vm); 3396 struct i915_address_space *vm);
3217 3397
3218unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
3219 struct i915_address_space *vm);
3220struct i915_vma * 3398struct i915_vma *
3221i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3399i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3222 struct i915_address_space *vm); 3400 struct i915_address_space *vm);
@@ -3251,14 +3429,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
3251 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3429 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
3252} 3430}
3253 3431
3254static inline unsigned long 3432unsigned long
3255i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3433i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
3256{
3257 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3259
3260 return i915_gem_obj_size(obj, &ggtt->base);
3261}
3262 3434
3263static inline int __must_check 3435static inline int __must_check
3264i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3436i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3272,12 +3444,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
3272 alignment, flags | PIN_GLOBAL); 3444 alignment, flags | PIN_GLOBAL);
3273} 3445}
3274 3446
3275static inline int
3276i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
3277{
3278 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
3279}
3280
3281void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3447void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3282 const struct i915_ggtt_view *view); 3448 const struct i915_ggtt_view *view);
3283static inline void 3449static inline void
@@ -3301,28 +3467,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3301 3467
3302/* i915_gem_context.c */ 3468/* i915_gem_context.c */
3303int __must_check i915_gem_context_init(struct drm_device *dev); 3469int __must_check i915_gem_context_init(struct drm_device *dev);
3470void i915_gem_context_lost(struct drm_i915_private *dev_priv);
3304void i915_gem_context_fini(struct drm_device *dev); 3471void i915_gem_context_fini(struct drm_device *dev);
3305void i915_gem_context_reset(struct drm_device *dev); 3472void i915_gem_context_reset(struct drm_device *dev);
3306int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3473int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3307int i915_gem_context_enable(struct drm_i915_gem_request *req);
3308void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3474void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3309int i915_switch_context(struct drm_i915_gem_request *req); 3475int i915_switch_context(struct drm_i915_gem_request *req);
3310struct intel_context *
3311i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
3312void i915_gem_context_free(struct kref *ctx_ref); 3476void i915_gem_context_free(struct kref *ctx_ref);
3313struct drm_i915_gem_object * 3477struct drm_i915_gem_object *
3314i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3478i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
3315static inline void i915_gem_context_reference(struct intel_context *ctx) 3479struct i915_gem_context *
3480i915_gem_context_create_gvt(struct drm_device *dev);
3481
3482static inline struct i915_gem_context *
3483i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3484{
3485 struct i915_gem_context *ctx;
3486
3487 lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
3488
3489 ctx = idr_find(&file_priv->context_idr, id);
3490 if (!ctx)
3491 return ERR_PTR(-ENOENT);
3492
3493 return ctx;
3494}
3495
3496static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
3316{ 3497{
3317 kref_get(&ctx->ref); 3498 kref_get(&ctx->ref);
3318} 3499}
3319 3500
3320static inline void i915_gem_context_unreference(struct intel_context *ctx) 3501static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
3321{ 3502{
3503 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
3322 kref_put(&ctx->ref, i915_gem_context_free); 3504 kref_put(&ctx->ref, i915_gem_context_free);
3323} 3505}
3324 3506
3325static inline bool i915_gem_context_is_default(const struct intel_context *c) 3507static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3326{ 3508{
3327 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3509 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3328} 3510}
@@ -3335,6 +3517,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3335 struct drm_file *file_priv); 3517 struct drm_file *file_priv);
3336int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3518int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3337 struct drm_file *file_priv); 3519 struct drm_file *file_priv);
3520int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3521 struct drm_file *file);
3338 3522
3339/* i915_gem_evict.c */ 3523/* i915_gem_evict.c */
3340int __must_check i915_gem_evict_something(struct drm_device *dev, 3524int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3349,9 +3533,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
3349int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3533int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3350 3534
3351/* belongs in i915_gem_gtt.h */ 3535/* belongs in i915_gem_gtt.h */
3352static inline void i915_gem_chipset_flush(struct drm_device *dev) 3536static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3353{ 3537{
3354 if (INTEL_INFO(dev)->gen < 6) 3538 if (INTEL_GEN(dev_priv) < 6)
3355 intel_gtt_chipset_flush(); 3539 intel_gtt_chipset_flush();
3356} 3540}
3357 3541
@@ -3430,18 +3614,19 @@ static inline void i915_error_state_buf_release(
3430{ 3614{
3431 kfree(eb->buf); 3615 kfree(eb->buf);
3432} 3616}
3433void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3617void i915_capture_error_state(struct drm_i915_private *dev_priv,
3618 u32 engine_mask,
3434 const char *error_msg); 3619 const char *error_msg);
3435void i915_error_state_get(struct drm_device *dev, 3620void i915_error_state_get(struct drm_device *dev,
3436 struct i915_error_state_file_priv *error_priv); 3621 struct i915_error_state_file_priv *error_priv);
3437void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3622void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3438void i915_destroy_error_state(struct drm_device *dev); 3623void i915_destroy_error_state(struct drm_device *dev);
3439 3624
3440void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3625void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
3441const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3626const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3442 3627
3443/* i915_cmd_parser.c */ 3628/* i915_cmd_parser.c */
3444int i915_cmd_parser_get_version(void); 3629int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3445int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3630int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
3446void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3631void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
3447bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3632bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3481,6 +3666,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
3481bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3666bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3482bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3667bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3483bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3668bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3669bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3484bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3670bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3485bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3671bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3486bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3672bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
@@ -3489,31 +3675,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3489 3675
3490/* intel_opregion.c */ 3676/* intel_opregion.c */
3491#ifdef CONFIG_ACPI 3677#ifdef CONFIG_ACPI
3492extern int intel_opregion_setup(struct drm_device *dev); 3678extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3493extern void intel_opregion_init(struct drm_device *dev); 3679extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3494extern void intel_opregion_fini(struct drm_device *dev); 3680extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3495extern void intel_opregion_asle_intr(struct drm_device *dev); 3681extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3496extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3682extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3497 bool enable); 3683 bool enable);
3498extern int intel_opregion_notify_adapter(struct drm_device *dev, 3684extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3499 pci_power_t state); 3685 pci_power_t state);
3500extern int intel_opregion_get_panel_type(struct drm_device *dev); 3686extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3501#else 3687#else
3502static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3688static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3503static inline void intel_opregion_init(struct drm_device *dev) { return; } 3689static inline void intel_opregion_init(struct drm_i915_private *dev) { }
3504static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3690static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
3505static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3691static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3692{
3693}
3506static inline int 3694static inline int
3507intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3695intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3508{ 3696{
3509 return 0; 3697 return 0;
3510} 3698}
3511static inline int 3699static inline int
3512intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3700intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3513{ 3701{
3514 return 0; 3702 return 0;
3515} 3703}
3516static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3704static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3517{ 3705{
3518 return -ENODEV; 3706 return -ENODEV;
3519} 3707}
@@ -3533,31 +3721,30 @@ extern void intel_modeset_init_hw(struct drm_device *dev);
3533extern void intel_modeset_init(struct drm_device *dev); 3721extern void intel_modeset_init(struct drm_device *dev);
3534extern void intel_modeset_gem_init(struct drm_device *dev); 3722extern void intel_modeset_gem_init(struct drm_device *dev);
3535extern void intel_modeset_cleanup(struct drm_device *dev); 3723extern void intel_modeset_cleanup(struct drm_device *dev);
3536extern void intel_connector_unregister(struct intel_connector *); 3724extern void intel_connector_unregister(struct drm_connector *);
3537extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3725extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3538extern void intel_display_resume(struct drm_device *dev); 3726extern void intel_display_resume(struct drm_device *dev);
3539extern void i915_redisable_vga(struct drm_device *dev); 3727extern void i915_redisable_vga(struct drm_device *dev);
3540extern void i915_redisable_vga_power_on(struct drm_device *dev); 3728extern void i915_redisable_vga_power_on(struct drm_device *dev);
3541extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3729extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3542extern void intel_init_pch_refclk(struct drm_device *dev); 3730extern void intel_init_pch_refclk(struct drm_device *dev);
3543extern void intel_set_rps(struct drm_device *dev, u8 val); 3731extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3544extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3732extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3545 bool enable); 3733 bool enable);
3546extern void intel_detect_pch(struct drm_device *dev); 3734extern void intel_detect_pch(struct drm_device *dev);
3547extern int intel_enable_rc6(const struct drm_device *dev);
3548 3735
3549extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3736extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
3550int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3737int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3551 struct drm_file *file); 3738 struct drm_file *file);
3552int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3553 struct drm_file *file);
3554 3739
3555/* overlay */ 3740/* overlay */
3556extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3741extern struct intel_overlay_error_state *
3742intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3557extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3743extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3558 struct intel_overlay_error_state *error); 3744 struct intel_overlay_error_state *error);
3559 3745
3560extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3746extern struct intel_display_error_state *
3747intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3561extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3748extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3562 struct drm_device *dev, 3749 struct drm_device *dev,
3563 struct intel_display_error_state *error); 3750 struct intel_display_error_state *error);
@@ -3586,6 +3773,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3586u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3773u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3587void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3774void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3588 3775
3776/* intel_dpio_phy.c */
3777void chv_set_phy_signal_level(struct intel_encoder *encoder,
3778 u32 deemph_reg_value, u32 margin_reg_value,
3779 bool uniq_trans_scale);
3780void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3781 bool reset);
3782void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3783void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3784void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3785void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3786
3787void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3788 u32 demph_reg_value, u32 preemph_reg_value,
3789 u32 uniqtranscale_reg_value, u32 tx3_demph);
3790void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3791void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3792void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3793
3589int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3794int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3590int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3795int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3591 3796
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851cee3..21d0dea57312 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
54 54
55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{ 56{
57 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58 return false;
59
57 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 60 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
58 return true; 61 return true;
59 62
60 return obj->pin_display; 63 return obj->pin_display;
61} 64}
62 65
66static int
67insert_mappable_node(struct drm_i915_private *i915,
68 struct drm_mm_node *node, u32 size)
69{
70 memset(node, 0, sizeof(*node));
71 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 size, 0, 0, 0,
73 i915->ggtt.mappable_end,
74 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
76}
77
78static void
79remove_mappable_node(struct drm_mm_node *node)
80{
81 drm_mm_remove_node(node);
82}
83
63/* some bookkeeping */ 84/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 85static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size) 86 size_t size)
@@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 vaddr += PAGE_SIZE; 198 vaddr += PAGE_SIZE;
178 } 199 }
179 200
180 i915_gem_chipset_flush(obj->base.dev); 201 i915_gem_chipset_flush(to_i915(obj->base.dev));
181 202
182 st = kmalloc(sizeof(*st), GFP_KERNEL); 203 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL) 204 if (st == NULL)
@@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
347 } 368 }
348 369
349 drm_clflush_virt_range(vaddr, args->size); 370 drm_clflush_virt_range(vaddr, args->size);
350 i915_gem_chipset_flush(dev); 371 i915_gem_chipset_flush(to_i915(dev));
351 372
352out: 373out:
353 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 374 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file,
381 return -EINVAL; 402 return -EINVAL;
382 403
383 /* Allocate the new object */ 404 /* Allocate the new object */
384 obj = i915_gem_alloc_object(dev, size); 405 obj = i915_gem_object_create(dev, size);
385 if (obj == NULL) 406 if (IS_ERR(obj))
386 return -ENOMEM; 407 return PTR_ERR(obj);
387 408
388 ret = drm_gem_handle_create(file, &obj->base, &handle); 409 ret = drm_gem_handle_create(file, &obj->base, &handle);
389 /* drop reference from allocate - handle holds it now */ 410 /* drop reference from allocate - handle holds it now */
@@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file,
409 430
410/** 431/**
411 * Creates a new mm object and returns a handle to it. 432 * Creates a new mm object and returns a handle to it.
433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
412 */ 436 */
413int 437int
414i915_gem_create_ioctl(struct drm_device *dev, void *data, 438i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
585 return ret ? - EFAULT : 0; 609 return ret ? - EFAULT : 0;
586} 610}
587 611
612static inline unsigned long
613slow_user_access(struct io_mapping *mapping,
614 uint64_t page_base, int page_offset,
615 char __user *user_data,
616 unsigned long length, bool pwrite)
617{
618 void __iomem *ioaddr;
619 void *vaddr;
620 uint64_t unwritten;
621
622 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr = (void __force *)ioaddr + page_offset;
625 if (pwrite)
626 unwritten = __copy_from_user(vaddr, user_data, length);
627 else
628 unwritten = __copy_to_user(user_data, vaddr, length);
629
630 io_mapping_unmap(ioaddr);
631 return unwritten;
632}
633
634static int
635i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr)
638{
639 struct drm_i915_private *dev_priv = dev->dev_private;
640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node;
642 char __user *user_data;
643 uint64_t remain;
644 uint64_t offset;
645 int ret;
646
647 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
648 if (ret) {
649 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
650 if (ret)
651 goto out;
652
653 ret = i915_gem_object_get_pages(obj);
654 if (ret) {
655 remove_mappable_node(&node);
656 goto out;
657 }
658
659 i915_gem_object_pin_pages(obj);
660 } else {
661 node.start = i915_gem_obj_ggtt_offset(obj);
662 node.allocated = false;
663 ret = i915_gem_object_put_fence(obj);
664 if (ret)
665 goto out_unpin;
666 }
667
668 ret = i915_gem_object_set_to_gtt_domain(obj, false);
669 if (ret)
670 goto out_unpin;
671
672 user_data = u64_to_user_ptr(data_ptr);
673 remain = size;
674 offset = data_offset;
675
676 mutex_unlock(&dev->struct_mutex);
677 if (likely(!i915.prefault_disable)) {
678 ret = fault_in_multipages_writeable(user_data, remain);
679 if (ret) {
680 mutex_lock(&dev->struct_mutex);
681 goto out_unpin;
682 }
683 }
684
685 while (remain > 0) {
686 /* Operation in this page
687 *
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
691 */
692 u32 page_base = node.start;
693 unsigned page_offset = offset_in_page(offset);
694 unsigned page_length = PAGE_SIZE - page_offset;
695 page_length = remain < page_length ? remain : page_length;
696 if (node.allocated) {
697 wmb();
698 ggtt->base.insert_page(&ggtt->base,
699 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
700 node.start,
701 I915_CACHE_NONE, 0);
702 wmb();
703 } else {
704 page_base += offset & PAGE_MASK;
705 }
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
709 */
710 if (slow_user_access(ggtt->mappable, page_base,
711 page_offset, user_data,
712 page_length, false)) {
713 ret = -EFAULT;
714 break;
715 }
716
717 remain -= page_length;
718 user_data += page_length;
719 offset += page_length;
720 }
721
722 mutex_lock(&dev->struct_mutex);
723 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
728 * domain.
729 */
730 ret = i915_gem_object_set_to_gtt_domain(obj, false);
731 }
732
733out_unpin:
734 if (node.allocated) {
735 wmb();
736 ggtt->base.clear_range(&ggtt->base,
737 node.start, node.size,
738 true);
739 i915_gem_object_unpin_pages(obj);
740 remove_mappable_node(&node);
741 } else {
742 i915_gem_object_ggtt_unpin(obj);
743 }
744out:
745 return ret;
746}
747
588static int 748static int
589i915_gem_shmem_pread(struct drm_device *dev, 749i915_gem_shmem_pread(struct drm_device *dev,
590 struct drm_i915_gem_object *obj, 750 struct drm_i915_gem_object *obj,
@@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
600 int needs_clflush = 0; 760 int needs_clflush = 0;
601 struct sg_page_iter sg_iter; 761 struct sg_page_iter sg_iter;
602 762
763 if (!obj->base.filp)
764 return -ENODEV;
765
603 user_data = u64_to_user_ptr(args->data_ptr); 766 user_data = u64_to_user_ptr(args->data_ptr);
604 remain = args->size; 767 remain = args->size;
605 768
@@ -672,6 +835,9 @@ out:
672 835
673/** 836/**
674 * Reads data from the object referenced by handle. 837 * Reads data from the object referenced by handle.
838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
675 * 841 *
676 * On error, the contents of *data are undefined. 842 * On error, the contents of *data are undefined.
677 */ 843 */
@@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
708 goto out; 874 goto out;
709 } 875 }
710 876
711 /* prime objects have no backing filp to GEM pread/pwrite
712 * pages from.
713 */
714 if (!obj->base.filp) {
715 ret = -EINVAL;
716 goto out;
717 }
718
719 trace_i915_gem_object_pread(obj, args->offset, args->size); 877 trace_i915_gem_object_pread(obj, args->offset, args->size);
720 878
721 ret = i915_gem_shmem_pread(dev, obj, args, file); 879 ret = i915_gem_shmem_pread(dev, obj, args, file);
722 880
881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV)
883 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr);
885
723out: 886out:
724 drm_gem_object_unreference(&obj->base); 887 drm_gem_object_unreference(&obj->base);
725unlock: 888unlock:
@@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping,
753/** 916/**
754 * This is the fast pwrite path, where we copy the data directly from the 917 * This is the fast pwrite path, where we copy the data directly from the
755 * user into the GTT, uncached. 918 * user into the GTT, uncached.
919 * @dev: drm device pointer
920 * @obj: i915 gem object
921 * @args: pwrite arguments structure
922 * @file: drm file pointer
756 */ 923 */
757static int 924static int
758i915_gem_gtt_pwrite_fast(struct drm_device *dev, 925i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
759 struct drm_i915_gem_object *obj, 926 struct drm_i915_gem_object *obj,
760 struct drm_i915_gem_pwrite *args, 927 struct drm_i915_gem_pwrite *args,
761 struct drm_file *file) 928 struct drm_file *file)
762{ 929{
763 struct drm_i915_private *dev_priv = to_i915(dev); 930 struct i915_ggtt *ggtt = &i915->ggtt;
764 struct i915_ggtt *ggtt = &dev_priv->ggtt; 931 struct drm_device *dev = obj->base.dev;
765 ssize_t remain; 932 struct drm_mm_node node;
766 loff_t offset, page_base; 933 uint64_t remain, offset;
767 char __user *user_data; 934 char __user *user_data;
768 int page_offset, page_length, ret; 935 int ret;
936 bool hit_slow_path = false;
937
938 if (obj->tiling_mode != I915_TILING_NONE)
939 return -EFAULT;
769 940
770 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 941 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
771 if (ret) 942 if (ret) {
772 goto out; 943 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
944 if (ret)
945 goto out;
946
947 ret = i915_gem_object_get_pages(obj);
948 if (ret) {
949 remove_mappable_node(&node);
950 goto out;
951 }
952
953 i915_gem_object_pin_pages(obj);
954 } else {
955 node.start = i915_gem_obj_ggtt_offset(obj);
956 node.allocated = false;
957 ret = i915_gem_object_put_fence(obj);
958 if (ret)
959 goto out_unpin;
960 }
773 961
774 ret = i915_gem_object_set_to_gtt_domain(obj, true); 962 ret = i915_gem_object_set_to_gtt_domain(obj, true);
775 if (ret) 963 if (ret)
776 goto out_unpin; 964 goto out_unpin;
777 965
778 ret = i915_gem_object_put_fence(obj); 966 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
779 if (ret) 967 obj->dirty = true;
780 goto out_unpin;
781 968
782 user_data = u64_to_user_ptr(args->data_ptr); 969 user_data = u64_to_user_ptr(args->data_ptr);
970 offset = args->offset;
783 remain = args->size; 971 remain = args->size;
784 972 while (remain) {
785 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786
787 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788
789 while (remain > 0) {
790 /* Operation in this page 973 /* Operation in this page
791 * 974 *
792 * page_base = page offset within aperture 975 * page_base = page offset within aperture
793 * page_offset = offset within page 976 * page_offset = offset within page
794 * page_length = bytes to copy for this page 977 * page_length = bytes to copy for this page
795 */ 978 */
796 page_base = offset & PAGE_MASK; 979 u32 page_base = node.start;
797 page_offset = offset_in_page(offset); 980 unsigned page_offset = offset_in_page(offset);
798 page_length = remain; 981 unsigned page_length = PAGE_SIZE - page_offset;
799 if ((page_offset + remain) > PAGE_SIZE) 982 page_length = remain < page_length ? remain : page_length;
800 page_length = PAGE_SIZE - page_offset; 983 if (node.allocated) {
801 984 wmb(); /* flush the write before we modify the GGTT */
985 ggtt->base.insert_page(&ggtt->base,
986 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
987 node.start, I915_CACHE_NONE, 0);
988 wmb(); /* flush modifications to the GGTT (insert_page) */
989 } else {
990 page_base += offset & PAGE_MASK;
991 }
802 /* If we get a fault while copying data, then (presumably) our 992 /* If we get a fault while copying data, then (presumably) our
803 * source page isn't available. Return the error and we'll 993 * source page isn't available. Return the error and we'll
804 * retry in the slow path. 994 * retry in the slow path.
995 * If the object is non-shmem backed, we retry again with the
996 * path that handles page fault.
805 */ 997 */
806 if (fast_user_write(ggtt->mappable, page_base, 998 if (fast_user_write(ggtt->mappable, page_base,
807 page_offset, user_data, page_length)) { 999 page_offset, user_data, page_length)) {
808 ret = -EFAULT; 1000 hit_slow_path = true;
809 goto out_flush; 1001 mutex_unlock(&dev->struct_mutex);
1002 if (slow_user_access(ggtt->mappable,
1003 page_base,
1004 page_offset, user_data,
1005 page_length, true)) {
1006 ret = -EFAULT;
1007 mutex_lock(&dev->struct_mutex);
1008 goto out_flush;
1009 }
1010
1011 mutex_lock(&dev->struct_mutex);
810 } 1012 }
811 1013
812 remain -= page_length; 1014 remain -= page_length;
@@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
815 } 1017 }
816 1018
817out_flush: 1019out_flush:
1020 if (hit_slow_path) {
1021 if (ret == 0 &&
1022 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1023 /* The user has modified the object whilst we tried
1024 * reading from it, and we now have no idea what domain
1025 * the pages should be in. As we have just been touching
1026 * them directly, flush everything back to the GTT
1027 * domain.
1028 */
1029 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1030 }
1031 }
1032
818 intel_fb_obj_flush(obj, false, ORIGIN_GTT); 1033 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
819out_unpin: 1034out_unpin:
820 i915_gem_object_ggtt_unpin(obj); 1035 if (node.allocated) {
1036 wmb();
1037 ggtt->base.clear_range(&ggtt->base,
1038 node.start, node.size,
1039 true);
1040 i915_gem_object_unpin_pages(obj);
1041 remove_mappable_node(&node);
1042 } else {
1043 i915_gem_object_ggtt_unpin(obj);
1044 }
821out: 1045out:
822 return ret; 1046 return ret;
823} 1047}
@@ -1006,7 +1230,7 @@ out:
1006 } 1230 }
1007 1231
1008 if (needs_clflush_after) 1232 if (needs_clflush_after)
1009 i915_gem_chipset_flush(dev); 1233 i915_gem_chipset_flush(to_i915(dev));
1010 else 1234 else
1011 obj->cache_dirty = true; 1235 obj->cache_dirty = true;
1012 1236
@@ -1016,6 +1240,9 @@ out:
1016 1240
1017/** 1241/**
1018 * Writes data to the object referenced by handle. 1242 * Writes data to the object referenced by handle.
1243 * @dev: drm device
1244 * @data: ioctl data blob
1245 * @file: drm file
1019 * 1246 *
1020 * On error, the contents of the buffer that were to be modified are undefined. 1247 * On error, the contents of the buffer that were to be modified are undefined.
1021 */ 1248 */
@@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1062 goto out; 1289 goto out;
1063 } 1290 }
1064 1291
1065 /* prime objects have no backing filp to GEM pread/pwrite
1066 * pages from.
1067 */
1068 if (!obj->base.filp) {
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
1073 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1292 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1074 1293
1075 ret = -EFAULT; 1294 ret = -EFAULT;
@@ -1079,20 +1298,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1079 * pread/pwrite currently are reading and writing from the CPU 1298 * pread/pwrite currently are reading and writing from the CPU
1080 * perspective, requiring manual detiling by the client. 1299 * perspective, requiring manual detiling by the client.
1081 */ 1300 */
1082 if (obj->tiling_mode == I915_TILING_NONE && 1301 if (!obj->base.filp || cpu_write_needs_clflush(obj)) {
1083 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1302 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1084 cpu_write_needs_clflush(obj)) {
1085 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1086 /* Note that the gtt paths might fail with non-page-backed user 1303 /* Note that the gtt paths might fail with non-page-backed user
1087 * pointers (e.g. gtt mappings when moving data between 1304 * pointers (e.g. gtt mappings when moving data between
1088 * textures). Fallback to the shmem path in that case. */ 1305 * textures). Fallback to the shmem path in that case. */
1089 } 1306 }
1090 1307
1091 if (ret == -EFAULT || ret == -ENOSPC) { 1308 if (ret == -EFAULT) {
1092 if (obj->phys_handle) 1309 if (obj->phys_handle)
1093 ret = i915_gem_phys_pwrite(obj, args, file); 1310 ret = i915_gem_phys_pwrite(obj, args, file);
1094 else 1311 else if (obj->base.filp)
1095 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1312 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1313 else
1314 ret = -ENODEV;
1096 } 1315 }
1097 1316
1098out: 1317out:
@@ -1213,6 +1432,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1213 * @req: duh! 1432 * @req: duh!
1214 * @interruptible: do an interruptible wait (normally yes) 1433 * @interruptible: do an interruptible wait (normally yes)
1215 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1434 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1435 * @rps: RPS client
1216 * 1436 *
1217 * Note: It is of utmost importance that the passed in seqno and reset_counter 1437 * Note: It is of utmost importance that the passed in seqno and reset_counter
1218 * values have been read by the caller in an smp safe manner. Where read-side 1438 * values have been read by the caller in an smp safe manner. Where read-side
@@ -1230,8 +1450,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1230 struct intel_rps_client *rps) 1450 struct intel_rps_client *rps)
1231{ 1451{
1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req); 1452 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 struct drm_device *dev = engine->dev; 1453 struct drm_i915_private *dev_priv = req->i915;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress = 1454 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); 1455 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1456 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1413,6 +1632,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1413 list_del_init(&request->list); 1632 list_del_init(&request->list);
1414 i915_gem_request_remove_from_client(request); 1633 i915_gem_request_remove_from_client(request);
1415 1634
1635 if (request->previous_context) {
1636 if (i915.enable_execlists)
1637 intel_lr_context_unpin(request->previous_context,
1638 request->engine);
1639 }
1640
1641 i915_gem_context_unreference(request->ctx);
1416 i915_gem_request_unreference(request); 1642 i915_gem_request_unreference(request);
1417} 1643}
1418 1644
@@ -1422,7 +1648,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1422 struct intel_engine_cs *engine = req->engine; 1648 struct intel_engine_cs *engine = req->engine;
1423 struct drm_i915_gem_request *tmp; 1649 struct drm_i915_gem_request *tmp;
1424 1650
1425 lockdep_assert_held(&engine->dev->struct_mutex); 1651 lockdep_assert_held(&engine->i915->dev->struct_mutex);
1426 1652
1427 if (list_empty(&req->list)) 1653 if (list_empty(&req->list))
1428 return; 1654 return;
@@ -1440,6 +1666,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1440/** 1666/**
1441 * Waits for a request to be signaled, and cleans up the 1667 * Waits for a request to be signaled, and cleans up the
1442 * request and object lists appropriately for that event. 1668 * request and object lists appropriately for that event.
1669 * @req: request to wait on
1443 */ 1670 */
1444int 1671int
1445i915_wait_request(struct drm_i915_gem_request *req) 1672i915_wait_request(struct drm_i915_gem_request *req)
@@ -1466,6 +1693,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
1466/** 1693/**
1467 * Ensures that all rendering to the object has completed and the object is 1694 * Ensures that all rendering to the object has completed and the object is
1468 * safe to unbind from the GTT or access from the CPU. 1695 * safe to unbind from the GTT or access from the CPU.
1696 * @obj: i915 gem object
1697 * @readonly: waiting for read access or write
1469 */ 1698 */
1470int 1699int
1471i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1700i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -1583,6 +1812,9 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
1583/** 1812/**
1584 * Called when user space prepares to use an object with the CPU, either 1813 * Called when user space prepares to use an object with the CPU, either
1585 * through the mmap ioctl's mapping or a GTT mapping. 1814 * through the mmap ioctl's mapping or a GTT mapping.
1815 * @dev: drm device
1816 * @data: ioctl data blob
1817 * @file: drm file
1586 */ 1818 */
1587int 1819int
1588i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1820i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1646,6 +1878,9 @@ unlock:
1646 1878
1647/** 1879/**
1648 * Called when user space has done writes to this buffer 1880 * Called when user space has done writes to this buffer
1881 * @dev: drm device
1882 * @data: ioctl data blob
1883 * @file: drm file
1649 */ 1884 */
1650int 1885int
1651i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1886i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1676,8 +1911,11 @@ unlock:
1676} 1911}
1677 1912
1678/** 1913/**
1679 * Maps the contents of an object, returning the address it is mapped 1914 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1680 * into. 1915 * it is mapped to.
1916 * @dev: drm device
1917 * @data: ioctl data blob
1918 * @file: drm file
1681 * 1919 *
1682 * While the mapping holds a reference on the contents of the object, it doesn't 1920 * While the mapping holds a reference on the contents of the object, it doesn't
1683 * imply a ref on the object itself. 1921 * imply a ref on the object itself.
@@ -1982,7 +2220,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1982 return size; 2220 return size;
1983 2221
1984 /* Previous chips need a power-of-two fence region when tiling */ 2222 /* Previous chips need a power-of-two fence region when tiling */
1985 if (INTEL_INFO(dev)->gen == 3) 2223 if (IS_GEN3(dev))
1986 gtt_size = 1024*1024; 2224 gtt_size = 1024*1024;
1987 else 2225 else
1988 gtt_size = 512*1024; 2226 gtt_size = 512*1024;
@@ -1995,7 +2233,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1995 2233
1996/** 2234/**
1997 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 2235 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1998 * @obj: object to check 2236 * @dev: drm device
2237 * @size: object size
2238 * @tiling_mode: tiling mode
2239 * @fenced: is fenced alignemned required or not
1999 * 2240 *
2000 * Return the required GTT alignment for an object, taking into account 2241 * Return the required GTT alignment for an object, taking into account
2001 * potential fence register mapping. 2242 * potential fence register mapping.
@@ -2162,7 +2403,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2162static void 2403static void
2163i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 2404i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164{ 2405{
2165 struct sg_page_iter sg_iter; 2406 struct sgt_iter sgt_iter;
2407 struct page *page;
2166 int ret; 2408 int ret;
2167 2409
2168 BUG_ON(obj->madv == __I915_MADV_PURGED); 2410 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2426,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2184 if (obj->madv == I915_MADV_DONTNEED) 2426 if (obj->madv == I915_MADV_DONTNEED)
2185 obj->dirty = 0; 2427 obj->dirty = 0;
2186 2428
2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2429 for_each_sgt_page(page, sgt_iter, obj->pages) {
2188 struct page *page = sg_page_iter_page(&sg_iter);
2189
2190 if (obj->dirty) 2430 if (obj->dirty)
2191 set_page_dirty(page); 2431 set_page_dirty(page);
2192 2432
@@ -2243,7 +2483,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2243 struct address_space *mapping; 2483 struct address_space *mapping;
2244 struct sg_table *st; 2484 struct sg_table *st;
2245 struct scatterlist *sg; 2485 struct scatterlist *sg;
2246 struct sg_page_iter sg_iter; 2486 struct sgt_iter sgt_iter;
2247 struct page *page; 2487 struct page *page;
2248 unsigned long last_pfn = 0; /* suppress gcc warning */ 2488 unsigned long last_pfn = 0; /* suppress gcc warning */
2249 int ret; 2489 int ret;
@@ -2340,8 +2580,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2340 2580
2341err_pages: 2581err_pages:
2342 sg_mark_end(sg); 2582 sg_mark_end(sg);
2343 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2583 for_each_sgt_page(page, sgt_iter, st)
2344 put_page(sg_page_iter_page(&sg_iter)); 2584 put_page(page);
2345 sg_free_table(st); 2585 sg_free_table(st);
2346 kfree(st); 2586 kfree(st);
2347 2587
@@ -2395,6 +2635,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2395 return 0; 2635 return 0;
2396} 2636}
2397 2637
2638/* The 'mapping' part of i915_gem_object_pin_map() below */
2639static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2640{
2641 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2642 struct sg_table *sgt = obj->pages;
2643 struct sgt_iter sgt_iter;
2644 struct page *page;
2645 struct page *stack_pages[32];
2646 struct page **pages = stack_pages;
2647 unsigned long i = 0;
2648 void *addr;
2649
2650 /* A single page can always be kmapped */
2651 if (n_pages == 1)
2652 return kmap(sg_page(sgt->sgl));
2653
2654 if (n_pages > ARRAY_SIZE(stack_pages)) {
2655 /* Too big for stack -- allocate temporary array instead */
2656 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2657 if (!pages)
2658 return NULL;
2659 }
2660
2661 for_each_sgt_page(page, sgt_iter, sgt)
2662 pages[i++] = page;
2663
2664 /* Check that we have the expected number of pages */
2665 GEM_BUG_ON(i != n_pages);
2666
2667 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2668
2669 if (pages != stack_pages)
2670 drm_free_large(pages);
2671
2672 return addr;
2673}
2674
2675/* get, pin, and map the pages of the object into kernel space */
2398void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) 2676void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399{ 2677{
2400 int ret; 2678 int ret;
@@ -2407,29 +2685,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2407 2685
2408 i915_gem_object_pin_pages(obj); 2686 i915_gem_object_pin_pages(obj);
2409 2687
2410 if (obj->mapping == NULL) { 2688 if (!obj->mapping) {
2411 struct page **pages; 2689 obj->mapping = i915_gem_object_map(obj);
2412 2690 if (!obj->mapping) {
2413 pages = NULL;
2414 if (obj->base.size == PAGE_SIZE)
2415 obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 else
2417 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 sizeof(*pages),
2419 GFP_TEMPORARY);
2420 if (pages != NULL) {
2421 struct sg_page_iter sg_iter;
2422 int n;
2423
2424 n = 0;
2425 for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 obj->pages->nents, 0)
2427 pages[n++] = sg_page_iter_page(&sg_iter);
2428
2429 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 drm_free_large(pages);
2431 }
2432 if (obj->mapping == NULL) {
2433 i915_gem_object_unpin_pages(obj); 2691 i915_gem_object_unpin_pages(obj);
2434 return ERR_PTR(-ENOMEM); 2692 return ERR_PTR(-ENOMEM);
2435 } 2693 }
@@ -2502,9 +2760,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2502} 2760}
2503 2761
2504static int 2762static int
2505i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2763i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2506{ 2764{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_engine_cs *engine; 2765 struct intel_engine_cs *engine;
2509 int ret; 2766 int ret;
2510 2767
@@ -2514,7 +2771,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2514 if (ret) 2771 if (ret)
2515 return ret; 2772 return ret;
2516 } 2773 }
2517 i915_gem_retire_requests(dev); 2774 i915_gem_retire_requests(dev_priv);
2518 2775
2519 /* Finally reset hw state */ 2776 /* Finally reset hw state */
2520 for_each_engine(engine, dev_priv) 2777 for_each_engine(engine, dev_priv)
@@ -2534,7 +2791,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2534 /* HWS page needs to be set less than what we 2791 /* HWS page needs to be set less than what we
2535 * will inject to ring 2792 * will inject to ring
2536 */ 2793 */
2537 ret = i915_gem_init_seqno(dev, seqno - 1); 2794 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2538 if (ret) 2795 if (ret)
2539 return ret; 2796 return ret;
2540 2797
@@ -2550,13 +2807,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2550} 2807}
2551 2808
2552int 2809int
2553i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2810i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2554{ 2811{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557 /* reserve 0 for non-seqno */ 2812 /* reserve 0 for non-seqno */
2558 if (dev_priv->next_seqno == 0) { 2813 if (dev_priv->next_seqno == 0) {
2559 int ret = i915_gem_init_seqno(dev, 0); 2814 int ret = i915_gem_init_seqno(dev_priv, 0);
2560 if (ret) 2815 if (ret)
2561 return ret; 2816 return ret;
2562 2817
@@ -2580,6 +2835,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2580 struct drm_i915_private *dev_priv; 2835 struct drm_i915_private *dev_priv;
2581 struct intel_ringbuffer *ringbuf; 2836 struct intel_ringbuffer *ringbuf;
2582 u32 request_start; 2837 u32 request_start;
2838 u32 reserved_tail;
2583 int ret; 2839 int ret;
2584 2840
2585 if (WARN_ON(request == NULL)) 2841 if (WARN_ON(request == NULL))
@@ -2594,9 +2850,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2594 * should already have been reserved in the ring buffer. Let the ring 2850 * should already have been reserved in the ring buffer. Let the ring
2595 * know that it is time to use that space up. 2851 * know that it is time to use that space up.
2596 */ 2852 */
2597 intel_ring_reserved_space_use(ringbuf);
2598
2599 request_start = intel_ring_get_tail(ringbuf); 2853 request_start = intel_ring_get_tail(ringbuf);
2854 reserved_tail = request->reserved_space;
2855 request->reserved_space = 0;
2856
2600 /* 2857 /*
2601 * Emit any outstanding flushes - execbuf can fail to emit the flush 2858 * Emit any outstanding flushes - execbuf can fail to emit the flush
2602 * after having emitted the batchbuffer command. Hence we need to fix 2859 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2652,19 +2909,25 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2652 /* Not allowed to fail! */ 2909 /* Not allowed to fail! */
2653 WARN(ret, "emit|add_request failed: %d!\n", ret); 2910 WARN(ret, "emit|add_request failed: %d!\n", ret);
2654 2911
2655 i915_queue_hangcheck(engine->dev); 2912 i915_queue_hangcheck(engine->i915);
2656 2913
2657 queue_delayed_work(dev_priv->wq, 2914 queue_delayed_work(dev_priv->wq,
2658 &dev_priv->mm.retire_work, 2915 &dev_priv->mm.retire_work,
2659 round_jiffies_up_relative(HZ)); 2916 round_jiffies_up_relative(HZ));
2660 intel_mark_busy(dev_priv->dev); 2917 intel_mark_busy(dev_priv);
2661 2918
2662 /* Sanity check that the reserved size was large enough. */ 2919 /* Sanity check that the reserved size was large enough. */
2663 intel_ring_reserved_space_end(ringbuf); 2920 ret = intel_ring_get_tail(ringbuf) - request_start;
2921 if (ret < 0)
2922 ret += ringbuf->size;
2923 WARN_ONCE(ret > reserved_tail,
2924 "Not enough space reserved (%d bytes) "
2925 "for adding the request (%d bytes)\n",
2926 reserved_tail, ret);
2664} 2927}
2665 2928
2666static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2929static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2667 const struct intel_context *ctx) 2930 const struct i915_gem_context *ctx)
2668{ 2931{
2669 unsigned long elapsed; 2932 unsigned long elapsed;
2670 2933
@@ -2689,7 +2952,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2689} 2952}
2690 2953
2691static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2954static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2692 struct intel_context *ctx, 2955 struct i915_gem_context *ctx,
2693 const bool guilty) 2956 const bool guilty)
2694{ 2957{
2695 struct i915_ctx_hang_stats *hs; 2958 struct i915_ctx_hang_stats *hs;
@@ -2712,27 +2975,15 @@ void i915_gem_request_free(struct kref *req_ref)
2712{ 2975{
2713 struct drm_i915_gem_request *req = container_of(req_ref, 2976 struct drm_i915_gem_request *req = container_of(req_ref,
2714 typeof(*req), ref); 2977 typeof(*req), ref);
2715 struct intel_context *ctx = req->ctx;
2716
2717 if (req->file_priv)
2718 i915_gem_request_remove_from_client(req);
2719
2720 if (ctx) {
2721 if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 intel_lr_context_unpin(ctx, req->engine);
2723
2724 i915_gem_context_unreference(ctx);
2725 }
2726
2727 kmem_cache_free(req->i915->requests, req); 2978 kmem_cache_free(req->i915->requests, req);
2728} 2979}
2729 2980
2730static inline int 2981static inline int
2731__i915_gem_request_alloc(struct intel_engine_cs *engine, 2982__i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 struct intel_context *ctx, 2983 struct i915_gem_context *ctx,
2733 struct drm_i915_gem_request **req_out) 2984 struct drm_i915_gem_request **req_out)
2734{ 2985{
2735 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2986 struct drm_i915_private *dev_priv = engine->i915;
2736 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); 2987 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 struct drm_i915_gem_request *req; 2988 struct drm_i915_gem_request *req;
2738 int ret; 2989 int ret;
@@ -2754,7 +3005,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2754 if (req == NULL) 3005 if (req == NULL)
2755 return -ENOMEM; 3006 return -ENOMEM;
2756 3007
2757 ret = i915_gem_get_seqno(engine->dev, &req->seqno); 3008 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
2758 if (ret) 3009 if (ret)
2759 goto err; 3010 goto err;
2760 3011
@@ -2765,15 +3016,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2765 req->ctx = ctx; 3016 req->ctx = ctx;
2766 i915_gem_context_reference(req->ctx); 3017 i915_gem_context_reference(req->ctx);
2767 3018
2768 if (i915.enable_execlists)
2769 ret = intel_logical_ring_alloc_request_extras(req);
2770 else
2771 ret = intel_ring_alloc_request_extras(req);
2772 if (ret) {
2773 i915_gem_context_unreference(req->ctx);
2774 goto err;
2775 }
2776
2777 /* 3019 /*
2778 * Reserve space in the ring buffer for all the commands required to 3020 * Reserve space in the ring buffer for all the commands required to
2779 * eventually emit this request. This is to guarantee that the 3021 * eventually emit this request. This is to guarantee that the
@@ -2781,24 +3023,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2781 * to be redone if the request is not actually submitted straight 3023 * to be redone if the request is not actually submitted straight
2782 * away, e.g. because a GPU scheduler has deferred it. 3024 * away, e.g. because a GPU scheduler has deferred it.
2783 */ 3025 */
3026 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
3027
2784 if (i915.enable_execlists) 3028 if (i915.enable_execlists)
2785 ret = intel_logical_ring_reserve_space(req); 3029 ret = intel_logical_ring_alloc_request_extras(req);
2786 else 3030 else
2787 ret = intel_ring_reserve_space(req); 3031 ret = intel_ring_alloc_request_extras(req);
2788 if (ret) { 3032 if (ret)
2789 /* 3033 goto err_ctx;
2790 * At this point, the request is fully allocated even if not
2791 * fully prepared. Thus it can be cleaned up using the proper
2792 * free code.
2793 */
2794 intel_ring_reserved_space_cancel(req->ringbuf);
2795 i915_gem_request_unreference(req);
2796 return ret;
2797 }
2798 3034
2799 *req_out = req; 3035 *req_out = req;
2800 return 0; 3036 return 0;
2801 3037
3038err_ctx:
3039 i915_gem_context_unreference(ctx);
2802err: 3040err:
2803 kmem_cache_free(dev_priv->requests, req); 3041 kmem_cache_free(dev_priv->requests, req);
2804 return ret; 3042 return ret;
@@ -2818,13 +3056,13 @@ err:
2818 */ 3056 */
2819struct drm_i915_gem_request * 3057struct drm_i915_gem_request *
2820i915_gem_request_alloc(struct intel_engine_cs *engine, 3058i915_gem_request_alloc(struct intel_engine_cs *engine,
2821 struct intel_context *ctx) 3059 struct i915_gem_context *ctx)
2822{ 3060{
2823 struct drm_i915_gem_request *req; 3061 struct drm_i915_gem_request *req;
2824 int err; 3062 int err;
2825 3063
2826 if (ctx == NULL) 3064 if (ctx == NULL)
2827 ctx = to_i915(engine->dev)->kernel_context; 3065 ctx = engine->i915->kernel_context;
2828 err = __i915_gem_request_alloc(engine, ctx, &req); 3066 err = __i915_gem_request_alloc(engine, ctx, &req);
2829 return err ? ERR_PTR(err) : req; 3067 return err ? ERR_PTR(err) : req;
2830} 3068}
@@ -2888,13 +3126,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2888 /* Ensure irq handler finishes or is cancelled. */ 3126 /* Ensure irq handler finishes or is cancelled. */
2889 tasklet_kill(&engine->irq_tasklet); 3127 tasklet_kill(&engine->irq_tasklet);
2890 3128
2891 spin_lock_bh(&engine->execlist_lock); 3129 intel_execlists_cancel_requests(engine);
2892 /* list_splice_tail_init checks for empty lists */
2893 list_splice_tail_init(&engine->execlist_queue,
2894 &engine->execlist_retired_req_list);
2895 spin_unlock_bh(&engine->execlist_lock);
2896
2897 intel_execlists_retire_requests(engine);
2898 } 3130 }
2899 3131
2900 /* 3132 /*
@@ -2954,6 +3186,7 @@ void i915_gem_reset(struct drm_device *dev)
2954 3186
2955/** 3187/**
2956 * This function clears the request list as sequence numbers are passed. 3188 * This function clears the request list as sequence numbers are passed.
3189 * @engine: engine to retire requests on
2957 */ 3190 */
2958void 3191void
2959i915_gem_retire_requests_ring(struct intel_engine_cs *engine) 3192i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -3005,9 +3238,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3005} 3238}
3006 3239
3007bool 3240bool
3008i915_gem_retire_requests(struct drm_device *dev) 3241i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3009{ 3242{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 struct intel_engine_cs *engine; 3243 struct intel_engine_cs *engine;
3012 bool idle = true; 3244 bool idle = true;
3013 3245
@@ -3018,8 +3250,6 @@ i915_gem_retire_requests(struct drm_device *dev)
3018 spin_lock_bh(&engine->execlist_lock); 3250 spin_lock_bh(&engine->execlist_lock);
3019 idle &= list_empty(&engine->execlist_queue); 3251 idle &= list_empty(&engine->execlist_queue);
3020 spin_unlock_bh(&engine->execlist_lock); 3252 spin_unlock_bh(&engine->execlist_lock);
3021
3022 intel_execlists_retire_requests(engine);
3023 } 3253 }
3024 } 3254 }
3025 3255
@@ -3042,7 +3272,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
3042 /* Come back later if the device is busy... */ 3272 /* Come back later if the device is busy... */
3043 idle = false; 3273 idle = false;
3044 if (mutex_trylock(&dev->struct_mutex)) { 3274 if (mutex_trylock(&dev->struct_mutex)) {
3045 idle = i915_gem_retire_requests(dev); 3275 idle = i915_gem_retire_requests(dev_priv);
3046 mutex_unlock(&dev->struct_mutex); 3276 mutex_unlock(&dev->struct_mutex);
3047 } 3277 }
3048 if (!idle) 3278 if (!idle)
@@ -3066,7 +3296,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3066 * Also locking seems to be fubar here, engine->request_list is protected 3296 * Also locking seems to be fubar here, engine->request_list is protected
3067 * by dev->struct_mutex. */ 3297 * by dev->struct_mutex. */
3068 3298
3069 intel_mark_idle(dev); 3299 intel_mark_idle(dev_priv);
3070 3300
3071 if (mutex_trylock(&dev->struct_mutex)) { 3301 if (mutex_trylock(&dev->struct_mutex)) {
3072 for_each_engine(engine, dev_priv) 3302 for_each_engine(engine, dev_priv)
@@ -3080,6 +3310,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3080 * Ensures that an object will eventually get non-busy by flushing any required 3310 * Ensures that an object will eventually get non-busy by flushing any required
3081 * write domains, emitting any outstanding lazy request and retiring and 3311 * write domains, emitting any outstanding lazy request and retiring and
3082 * completed requests. 3312 * completed requests.
3313 * @obj: object to flush
3083 */ 3314 */
3084static int 3315static int
3085i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 3316i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +3327,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3096 if (req == NULL) 3327 if (req == NULL)
3097 continue; 3328 continue;
3098 3329
3099 if (list_empty(&req->list)) 3330 if (i915_gem_request_completed(req, true))
3100 goto retire;
3101
3102 if (i915_gem_request_completed(req, true)) {
3103 __i915_gem_request_retire__upto(req);
3104retire:
3105 i915_gem_object_retire__read(obj, i); 3331 i915_gem_object_retire__read(obj, i);
3106 }
3107 } 3332 }
3108 3333
3109 return 0; 3334 return 0;
@@ -3111,7 +3336,9 @@ retire:
3111 3336
3112/** 3337/**
3113 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3338 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3114 * @DRM_IOCTL_ARGS: standard ioctl arguments 3339 * @dev: drm device pointer
3340 * @data: ioctl data blob
3341 * @file: drm file pointer
3115 * 3342 *
3116 * Returns 0 if successful, else an error is returned with the remaining time in 3343 * Returns 0 if successful, else an error is returned with the remaining time in
3117 * the timeout parameter. 3344 * the timeout parameter.
@@ -3185,7 +3412,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3185 ret = __i915_wait_request(req[i], true, 3412 ret = __i915_wait_request(req[i], true,
3186 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 3413 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 to_rps_client(file)); 3414 to_rps_client(file));
3188 i915_gem_request_unreference__unlocked(req[i]); 3415 i915_gem_request_unreference(req[i]);
3189 } 3416 }
3190 return ret; 3417 return ret;
3191 3418
@@ -3211,7 +3438,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3211 if (i915_gem_request_completed(from_req, true)) 3438 if (i915_gem_request_completed(from_req, true))
3212 return 0; 3439 return 0;
3213 3440
3214 if (!i915_semaphore_is_enabled(obj->base.dev)) { 3441 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3215 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3442 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 ret = __i915_wait_request(from_req, 3443 ret = __i915_wait_request(from_req,
3217 i915->mm.interruptible, 3444 i915->mm.interruptible,
@@ -3345,6 +3572,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3345 old_write_domain); 3572 old_write_domain);
3346} 3573}
3347 3574
3575static void __i915_vma_iounmap(struct i915_vma *vma)
3576{
3577 GEM_BUG_ON(vma->pin_count);
3578
3579 if (vma->iomap == NULL)
3580 return;
3581
3582 io_mapping_unmap(vma->iomap);
3583 vma->iomap = NULL;
3584}
3585
3348static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3586static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349{ 3587{
3350 struct drm_i915_gem_object *obj = vma->obj; 3588 struct drm_i915_gem_object *obj = vma->obj;
@@ -3377,6 +3615,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3377 ret = i915_gem_object_put_fence(obj); 3615 ret = i915_gem_object_put_fence(obj);
3378 if (ret) 3616 if (ret)
3379 return ret; 3617 return ret;
3618
3619 __i915_vma_iounmap(vma);
3380 } 3620 }
3381 3621
3382 trace_i915_vma_unbind(vma); 3622 trace_i915_vma_unbind(vma);
@@ -3488,6 +3728,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3488/** 3728/**
3489 * Finds free space in the GTT aperture and binds the object or a view of it 3729 * Finds free space in the GTT aperture and binds the object or a view of it
3490 * there. 3730 * there.
3731 * @obj: object to bind
3732 * @vm: address space to bind into
3733 * @ggtt_view: global gtt view if applicable
3734 * @alignment: requested alignment
3735 * @flags: mask of PIN_* flags to use
3491 */ 3736 */
3492static struct i915_vma * 3737static struct i915_vma *
3493i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3738i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +3976,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3731 return; 3976 return;
3732 3977
3733 if (i915_gem_clflush_object(obj, obj->pin_display)) 3978 if (i915_gem_clflush_object(obj, obj->pin_display))
3734 i915_gem_chipset_flush(obj->base.dev); 3979 i915_gem_chipset_flush(to_i915(obj->base.dev));
3735 3980
3736 old_write_domain = obj->base.write_domain; 3981 old_write_domain = obj->base.write_domain;
3737 obj->base.write_domain = 0; 3982 obj->base.write_domain = 0;
@@ -3745,6 +3990,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3745 3990
3746/** 3991/**
3747 * Moves a single object to the GTT read, and possibly write domain. 3992 * Moves a single object to the GTT read, and possibly write domain.
3993 * @obj: object to act on
3994 * @write: ask for write access or read only
3748 * 3995 *
3749 * This function returns when the move is complete, including waiting on 3996 * This function returns when the move is complete, including waiting on
3750 * flushes to occur. 3997 * flushes to occur.
@@ -3816,6 +4063,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3816 4063
3817/** 4064/**
3818 * Changes the cache-level of an object across all VMA. 4065 * Changes the cache-level of an object across all VMA.
4066 * @obj: object to act on
4067 * @cache_level: new cache level to set for the object
3819 * 4068 *
3820 * After this function returns, the object will be in the new cache-level 4069 * After this function returns, the object will be in the new cache-level
3821 * across all GTT and the contents of the backing storage will be coherent, 4070 * across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +4174,9 @@ out:
3925 * object is now coherent at its new cache level (with respect 4174 * object is now coherent at its new cache level (with respect
3926 * to the access domain). 4175 * to the access domain).
3927 */ 4176 */
3928 if (obj->cache_dirty && 4177 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3929 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 cpu_write_needs_clflush(obj)) {
3931 if (i915_gem_clflush_object(obj, true)) 4178 if (i915_gem_clflush_object(obj, true))
3932 i915_gem_chipset_flush(obj->base.dev); 4179 i915_gem_chipset_flush(to_i915(obj->base.dev));
3933 } 4180 }
3934 4181
3935 return 0; 4182 return 0;
@@ -4097,6 +4344,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4097 4344
4098/** 4345/**
4099 * Moves a single object to the CPU read, and possibly write domain. 4346 * Moves a single object to the CPU read, and possibly write domain.
4347 * @obj: object to act on
4348 * @write: requesting write or read-only access
4100 * 4349 *
4101 * This function returns when the move is complete, including waiting on 4350 * This function returns when the move is complete, including waiting on
4102 * flushes to occur. 4351 * flushes to occur.
@@ -4198,7 +4447,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4198 if (ret == 0) 4447 if (ret == 0)
4199 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4448 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200 4449
4201 i915_gem_request_unreference__unlocked(target); 4450 i915_gem_request_unreference(target);
4202 4451
4203 return ret; 4452 return ret;
4204} 4453}
@@ -4499,21 +4748,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4499 .put_pages = i915_gem_object_put_pages_gtt, 4748 .put_pages = i915_gem_object_put_pages_gtt,
4500}; 4749};
4501 4750
4502struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4751struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4503 size_t size) 4752 size_t size)
4504{ 4753{
4505 struct drm_i915_gem_object *obj; 4754 struct drm_i915_gem_object *obj;
4506 struct address_space *mapping; 4755 struct address_space *mapping;
4507 gfp_t mask; 4756 gfp_t mask;
4757 int ret;
4508 4758
4509 obj = i915_gem_object_alloc(dev); 4759 obj = i915_gem_object_alloc(dev);
4510 if (obj == NULL) 4760 if (obj == NULL)
4511 return NULL; 4761 return ERR_PTR(-ENOMEM);
4512 4762
4513 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4763 ret = drm_gem_object_init(dev, &obj->base, size);
4514 i915_gem_object_free(obj); 4764 if (ret)
4515 return NULL; 4765 goto fail;
4516 }
4517 4766
4518 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4767 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4768 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4799,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4550 trace_i915_gem_object_create(obj); 4799 trace_i915_gem_object_create(obj);
4551 4800
4552 return obj; 4801 return obj;
4802
4803fail:
4804 i915_gem_object_free(obj);
4805
4806 return ERR_PTR(ret);
4553} 4807}
4554 4808
4555static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4809static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4655,16 +4909,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4655struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4909struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 const struct i915_ggtt_view *view) 4910 const struct i915_ggtt_view *view)
4657{ 4911{
4658 struct drm_device *dev = obj->base.dev;
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4660 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 struct i915_vma *vma; 4912 struct i915_vma *vma;
4662 4913
4663 BUG_ON(!view); 4914 GEM_BUG_ON(!view);
4664 4915
4665 list_for_each_entry(vma, &obj->vma_list, obj_link) 4916 list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 if (vma->vm == &ggtt->base && 4917 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4667 i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 return vma; 4918 return vma;
4669 return NULL; 4919 return NULL;
4670} 4920}
@@ -4706,9 +4956,10 @@ i915_gem_suspend(struct drm_device *dev)
4706 if (ret) 4956 if (ret)
4707 goto err; 4957 goto err;
4708 4958
4709 i915_gem_retire_requests(dev); 4959 i915_gem_retire_requests(dev_priv);
4710 4960
4711 i915_gem_stop_engines(dev); 4961 i915_gem_stop_engines(dev);
4962 i915_gem_context_lost(dev_priv);
4712 mutex_unlock(&dev->struct_mutex); 4963 mutex_unlock(&dev->struct_mutex);
4713 4964
4714 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4965 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4727,37 +4978,6 @@ err:
4727 return ret; 4978 return ret;
4728} 4979}
4729 4980
4730int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731{
4732 struct intel_engine_cs *engine = req->engine;
4733 struct drm_device *dev = engine->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 int i, ret;
4737
4738 if (!HAS_L3_DPF(dev) || !remap_info)
4739 return 0;
4740
4741 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 if (ret)
4743 return ret;
4744
4745 /*
4746 * Note: We do not worry about the concurrent register cacheline hang
4747 * here because no other code should access these registers other than
4748 * at initialization time.
4749 */
4750 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 intel_ring_emit(engine, remap_info[i]);
4754 }
4755
4756 intel_ring_advance(engine);
4757
4758 return ret;
4759}
4760
4761void i915_gem_init_swizzling(struct drm_device *dev) 4981void i915_gem_init_swizzling(struct drm_device *dev)
4762{ 4982{
4763 struct drm_i915_private *dev_priv = dev->dev_private; 4983 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4862,7 +5082,7 @@ i915_gem_init_hw(struct drm_device *dev)
4862{ 5082{
4863 struct drm_i915_private *dev_priv = dev->dev_private; 5083 struct drm_i915_private *dev_priv = dev->dev_private;
4864 struct intel_engine_cs *engine; 5084 struct intel_engine_cs *engine;
4865 int ret, j; 5085 int ret;
4866 5086
4867 /* Double layer security blanket, see i915_gem_init() */ 5087 /* Double layer security blanket, see i915_gem_init() */
4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5088 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,58 +5134,15 @@ i915_gem_init_hw(struct drm_device *dev)
4914 intel_mocs_init_l3cc_table(dev); 5134 intel_mocs_init_l3cc_table(dev);
4915 5135
4916 /* We can't enable contexts until all firmware is loaded */ 5136 /* We can't enable contexts until all firmware is loaded */
4917 if (HAS_GUC_UCODE(dev)) { 5137 ret = intel_guc_setup(dev);
4918 ret = intel_guc_ucode_load(dev); 5138 if (ret)
4919 if (ret) { 5139 goto out;
4920 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4921 ret = -EIO;
4922 goto out;
4923 }
4924 }
4925 5140
4926 /* 5141 /*
4927 * Increment the next seqno by 0x100 so we have a visible break 5142 * Increment the next seqno by 0x100 so we have a visible break
4928 * on re-initialisation 5143 * on re-initialisation
4929 */ 5144 */
4930 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); 5145 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 if (ret)
4932 goto out;
4933
4934 /* Now it is safe to go back round and do everything else: */
4935 for_each_engine(engine, dev_priv) {
4936 struct drm_i915_gem_request *req;
4937
4938 req = i915_gem_request_alloc(engine, NULL);
4939 if (IS_ERR(req)) {
4940 ret = PTR_ERR(req);
4941 break;
4942 }
4943
4944 if (engine->id == RCS) {
4945 for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 ret = i915_gem_l3_remap(req, j);
4947 if (ret)
4948 goto err_request;
4949 }
4950 }
4951
4952 ret = i915_ppgtt_init_ring(req);
4953 if (ret)
4954 goto err_request;
4955
4956 ret = i915_gem_context_enable(req);
4957 if (ret)
4958 goto err_request;
4959
4960err_request:
4961 i915_add_request_no_flush(req);
4962 if (ret) {
4963 DRM_ERROR("Failed to enable %s, error=%d\n",
4964 engine->name, ret);
4965 i915_gem_cleanup_engines(dev);
4966 break;
4967 }
4968 }
4969 5146
4970out: 5147out:
4971 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5148 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4977,9 +5154,6 @@ int i915_gem_init(struct drm_device *dev)
4977 struct drm_i915_private *dev_priv = dev->dev_private; 5154 struct drm_i915_private *dev_priv = dev->dev_private;
4978 int ret; 5155 int ret;
4979 5156
4980 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 i915.enable_execlists);
4982
4983 mutex_lock(&dev->struct_mutex); 5157 mutex_lock(&dev->struct_mutex);
4984 5158
4985 if (!i915.enable_execlists) { 5159 if (!i915.enable_execlists) {
@@ -5002,10 +5176,7 @@ int i915_gem_init(struct drm_device *dev)
5002 */ 5176 */
5003 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5177 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004 5178
5005 ret = i915_gem_init_userptr(dev); 5179 i915_gem_init_userptr(dev_priv);
5006 if (ret)
5007 goto out_unlock;
5008
5009 i915_gem_init_ggtt(dev); 5180 i915_gem_init_ggtt(dev);
5010 5181
5011 ret = i915_gem_context_init(dev); 5182 ret = i915_gem_context_init(dev);
@@ -5042,14 +5213,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
5042 5213
5043 for_each_engine(engine, dev_priv) 5214 for_each_engine(engine, dev_priv)
5044 dev_priv->gt.cleanup_engine(engine); 5215 dev_priv->gt.cleanup_engine(engine);
5045
5046 if (i915.enable_execlists)
5047 /*
5048 * Neither the BIOS, ourselves or any other kernel
5049 * expects the system to be in execlists mode on startup,
5050 * so we need to reset the GPU back to legacy mode.
5051 */
5052 intel_gpu_reset(dev, ALL_ENGINES);
5053} 5216}
5054 5217
5055static void 5218static void
@@ -5073,7 +5236,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5073 else 5236 else
5074 dev_priv->num_fence_regs = 8; 5237 dev_priv->num_fence_regs = 8;
5075 5238
5076 if (intel_vgpu_active(dev)) 5239 if (intel_vgpu_active(dev_priv))
5077 dev_priv->num_fence_regs = 5240 dev_priv->num_fence_regs =
5078 I915_READ(vgtif_reg(avail_rs.fence_num)); 5241 I915_READ(vgtif_reg(avail_rs.fence_num));
5079 5242
@@ -5148,6 +5311,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
5148 kmem_cache_destroy(dev_priv->objects); 5311 kmem_cache_destroy(dev_priv->objects);
5149} 5312}
5150 5313
5314int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5315{
5316 struct drm_i915_gem_object *obj;
5317
5318 /* Called just before we write the hibernation image.
5319 *
5320 * We need to update the domain tracking to reflect that the CPU
5321 * will be accessing all the pages to create and restore from the
5322 * hibernation, and so upon restoration those pages will be in the
5323 * CPU domain.
5324 *
5325 * To make sure the hibernation image contains the latest state,
5326 * we update that state just before writing out the image.
5327 */
5328
5329 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5330 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5331 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5332 }
5333
5334 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5335 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5336 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5337 }
5338
5339 return 0;
5340}
5341
5151void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5342void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152{ 5343{
5153 struct drm_i915_file_private *file_priv = file->driver_priv; 5344 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5254,13 +5445,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5254u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5445u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 const struct i915_ggtt_view *view) 5446 const struct i915_ggtt_view *view)
5256{ 5447{
5257 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 struct i915_vma *vma; 5448 struct i915_vma *vma;
5260 5449
5261 list_for_each_entry(vma, &o->vma_list, obj_link) 5450 list_for_each_entry(vma, &o->vma_list, obj_link)
5262 if (vma->vm == &ggtt->base && 5451 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5263 i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 return vma->node.start; 5452 return vma->node.start;
5265 5453
5266 WARN(1, "global vma for this object not found. (view=%u)\n", view->type); 5454 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5474,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5286bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5474bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 const struct i915_ggtt_view *view) 5475 const struct i915_ggtt_view *view)
5288{ 5476{
5289 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 struct i915_vma *vma; 5477 struct i915_vma *vma;
5292 5478
5293 list_for_each_entry(vma, &o->vma_list, obj_link) 5479 list_for_each_entry(vma, &o->vma_list, obj_link)
5294 if (vma->vm == &ggtt->base && 5480 if (vma->is_ggtt &&
5295 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5481 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 drm_mm_node_allocated(&vma->node)) 5482 drm_mm_node_allocated(&vma->node))
5297 return true; 5483 return true;
@@ -5310,23 +5496,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5310 return false; 5496 return false;
5311} 5497}
5312 5498
5313unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5499unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5314 struct i915_address_space *vm)
5315{ 5500{
5316 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 struct i915_vma *vma; 5501 struct i915_vma *vma;
5318 5502
5319 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5503 GEM_BUG_ON(list_empty(&o->vma_list));
5320
5321 BUG_ON(list_empty(&o->vma_list));
5322 5504
5323 list_for_each_entry(vma, &o->vma_list, obj_link) { 5505 list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 if (vma->is_ggtt && 5506 if (vma->is_ggtt &&
5325 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5507 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5326 continue;
5327 if (vma->vm == vm)
5328 return vma->node.size; 5508 return vma->node.size;
5329 } 5509 }
5510
5330 return 0; 5511 return 0;
5331} 5512}
5332 5513
@@ -5365,8 +5546,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
5365 size_t bytes; 5546 size_t bytes;
5366 int ret; 5547 int ret;
5367 5548
5368 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); 5549 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5369 if (IS_ERR_OR_NULL(obj)) 5550 if (IS_ERR(obj))
5370 return obj; 5551 return obj;
5371 5552
5372 ret = i915_gem_object_set_to_cpu_domain(obj, true); 5553 ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
134 if (obj == NULL) { 134 if (obj == NULL) {
135 int ret; 135 int ret;
136 136
137 obj = i915_gem_alloc_object(pool->dev, size); 137 obj = i915_gem_object_create(pool->dev, size);
138 if (obj == NULL) 138 if (IS_ERR(obj))
139 return ERR_PTR(-ENOMEM); 139 return obj;
140 140
141 ret = i915_gem_object_get_pages(obj); 141 ret = i915_gem_object_get_pages(obj);
142 if (ret) 142 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..30d9b4fd30f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h" 91#include "i915_trace.h"
92 92
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94
93/* This is a HW constraint. The value below is the largest known requirement 95/* This is a HW constraint. The value below is the largest known requirement
94 * I've seen in a spec to date, and that was a workaround for a non-shipping 96 * I've seen in a spec to date, and that was a workaround for a non-shipping
95 * part. It should be safe to decrease this, but it's more future proof as is. 97 * part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
97#define GEN6_CONTEXT_ALIGN (64<<10) 99#define GEN6_CONTEXT_ALIGN (64<<10)
98#define GEN7_CONTEXT_ALIGN 4096 100#define GEN7_CONTEXT_ALIGN 4096
99 101
100static size_t get_context_alignment(struct drm_device *dev) 102static size_t get_context_alignment(struct drm_i915_private *dev_priv)
101{ 103{
102 if (IS_GEN6(dev)) 104 if (IS_GEN6(dev_priv))
103 return GEN6_CONTEXT_ALIGN; 105 return GEN6_CONTEXT_ALIGN;
104 106
105 return GEN7_CONTEXT_ALIGN; 107 return GEN7_CONTEXT_ALIGN;
106} 108}
107 109
108static int get_context_size(struct drm_device *dev) 110static int get_context_size(struct drm_i915_private *dev_priv)
109{ 111{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 112 int ret;
112 u32 reg; 113 u32 reg;
113 114
114 switch (INTEL_INFO(dev)->gen) { 115 switch (INTEL_GEN(dev_priv)) {
115 case 6: 116 case 6:
116 reg = I915_READ(CXT_SIZE); 117 reg = I915_READ(CXT_SIZE);
117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 break; 119 break;
119 case 7: 120 case 7:
120 reg = I915_READ(GEN7_CXT_SIZE); 121 reg = I915_READ(GEN7_CXT_SIZE);
121 if (IS_HASWELL(dev)) 122 if (IS_HASWELL(dev_priv))
122 ret = HSW_CXT_TOTAL_SIZE; 123 ret = HSW_CXT_TOTAL_SIZE;
123 else 124 else
124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
133 return ret; 134 return ret;
134} 135}
135 136
136static void i915_gem_context_clean(struct intel_context *ctx) 137static void i915_gem_context_clean(struct i915_gem_context *ctx)
137{ 138{
138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 struct i915_vma *vma, *next; 140 struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
150 151
151void i915_gem_context_free(struct kref *ctx_ref) 152void i915_gem_context_free(struct kref *ctx_ref)
152{ 153{
153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i;
154 156
157 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
155 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
156 159
157 if (i915.enable_execlists)
158 intel_lr_context_free(ctx);
159
160 /* 160 /*
161 * This context is going away and we need to remove all VMAs still 161 * This context is going away and we need to remove all VMAs still
162 * around. This is to handle imported shared objects for which 162 * around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
166 166
167 i915_ppgtt_put(ctx->ppgtt); 167 i915_ppgtt_put(ctx->ppgtt);
168 168
169 if (ctx->legacy_hw_ctx.rcs_state) 169 for (i = 0; i < I915_NUM_ENGINES; i++) {
170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 170 struct intel_context *ce = &ctx->engine[i];
171
172 if (!ce->state)
173 continue;
174
175 WARN_ON(ce->pin_count);
176 if (ce->ringbuf)
177 intel_ringbuffer_free(ce->ringbuf);
178
179 drm_gem_object_unreference(&ce->state->base);
180 }
181
171 list_del(&ctx->link); 182 list_del(&ctx->link);
183
184 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
172 kfree(ctx); 185 kfree(ctx);
173} 186}
174 187
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
178 struct drm_i915_gem_object *obj; 191 struct drm_i915_gem_object *obj;
179 int ret; 192 int ret;
180 193
181 obj = i915_gem_alloc_object(dev, size); 194 lockdep_assert_held(&dev->struct_mutex);
182 if (obj == NULL) 195
183 return ERR_PTR(-ENOMEM); 196 obj = i915_gem_object_create(dev, size);
197 if (IS_ERR(obj))
198 return obj;
184 199
185 /* 200 /*
186 * Try to make the context utilize L3 as well as LLC. 201 * Try to make the context utilize L3 as well as LLC.
@@ -209,18 +224,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
209 return obj; 224 return obj;
210} 225}
211 226
212static struct intel_context * 227static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
228{
229 int ret;
230
231 ret = ida_simple_get(&dev_priv->context_hw_ida,
232 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
233 if (ret < 0) {
234 /* Contexts are only released when no longer active.
235 * Flush any pending retires to hopefully release some
236 * stale contexts and try again.
237 */
238 i915_gem_retire_requests(dev_priv);
239 ret = ida_simple_get(&dev_priv->context_hw_ida,
240 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
241 if (ret < 0)
242 return ret;
243 }
244
245 *out = ret;
246 return 0;
247}
248
249static struct i915_gem_context *
213__create_hw_context(struct drm_device *dev, 250__create_hw_context(struct drm_device *dev,
214 struct drm_i915_file_private *file_priv) 251 struct drm_i915_file_private *file_priv)
215{ 252{
216 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct intel_context *ctx; 254 struct i915_gem_context *ctx;
218 int ret; 255 int ret;
219 256
220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 257 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
221 if (ctx == NULL) 258 if (ctx == NULL)
222 return ERR_PTR(-ENOMEM); 259 return ERR_PTR(-ENOMEM);
223 260
261 ret = assign_hw_id(dev_priv, &ctx->hw_id);
262 if (ret) {
263 kfree(ctx);
264 return ERR_PTR(ret);
265 }
266
224 kref_init(&ctx->ref); 267 kref_init(&ctx->ref);
225 list_add_tail(&ctx->link, &dev_priv->context_list); 268 list_add_tail(&ctx->link, &dev_priv->context_list);
226 ctx->i915 = dev_priv; 269 ctx->i915 = dev_priv;
@@ -232,7 +275,7 @@ __create_hw_context(struct drm_device *dev,
232 ret = PTR_ERR(obj); 275 ret = PTR_ERR(obj);
233 goto err_out; 276 goto err_out;
234 } 277 }
235 ctx->legacy_hw_ctx.rcs_state = obj; 278 ctx->engine[RCS].state = obj;
236 } 279 }
237 280
238 /* Default context will never have a file_priv */ 281 /* Default context will never have a file_priv */
@@ -249,9 +292,13 @@ __create_hw_context(struct drm_device *dev,
249 /* NB: Mark all slices as needing a remap so that when the context first 292 /* NB: Mark all slices as needing a remap so that when the context first
250 * loads it will restore whatever remap state already exists. If there 293 * loads it will restore whatever remap state already exists. If there
251 * is no remap info, it will be a NOP. */ 294 * is no remap info, it will be a NOP. */
252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 295 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
253 296
254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 297 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
298 ctx->ring_size = 4 * PAGE_SIZE;
299 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
300 GEN8_CTX_ADDRESSING_MODE_SHIFT;
301 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
255 302
256 return ctx; 303 return ctx;
257 304
@@ -265,44 +312,27 @@ err_out:
265 * context state of the GPU for applications that don't utilize HW contexts, as 312 * context state of the GPU for applications that don't utilize HW contexts, as
266 * well as an idle case. 313 * well as an idle case.
267 */ 314 */
268static struct intel_context * 315static struct i915_gem_context *
269i915_gem_create_context(struct drm_device *dev, 316i915_gem_create_context(struct drm_device *dev,
270 struct drm_i915_file_private *file_priv) 317 struct drm_i915_file_private *file_priv)
271{ 318{
272 const bool is_global_default_ctx = file_priv == NULL; 319 struct i915_gem_context *ctx;
273 struct intel_context *ctx;
274 int ret = 0;
275 320
276 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 321 lockdep_assert_held(&dev->struct_mutex);
277 322
278 ctx = __create_hw_context(dev, file_priv); 323 ctx = __create_hw_context(dev, file_priv);
279 if (IS_ERR(ctx)) 324 if (IS_ERR(ctx))
280 return ctx; 325 return ctx;
281 326
282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
283 /* We may need to do things with the shrinker which
284 * require us to immediately switch back to the default
285 * context. This can cause a problem as pinning the
286 * default context also requires GTT space which may not
287 * be available. To avoid this we always pin the default
288 * context.
289 */
290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
291 get_context_alignment(dev), 0);
292 if (ret) {
293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
294 goto err_destroy;
295 }
296 }
297
298 if (USES_FULL_PPGTT(dev)) { 327 if (USES_FULL_PPGTT(dev)) {
299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 328 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
300 329
301 if (IS_ERR_OR_NULL(ppgtt)) { 330 if (IS_ERR(ppgtt)) {
302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 331 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
303 PTR_ERR(ppgtt)); 332 PTR_ERR(ppgtt));
304 ret = PTR_ERR(ppgtt); 333 idr_remove(&file_priv->context_idr, ctx->user_handle);
305 goto err_unpin; 334 i915_gem_context_unreference(ctx);
335 return ERR_CAST(ppgtt);
306 } 336 }
307 337
308 ctx->ppgtt = ppgtt; 338 ctx->ppgtt = ppgtt;
@@ -311,24 +341,53 @@ i915_gem_create_context(struct drm_device *dev,
311 trace_i915_context_create(ctx); 341 trace_i915_context_create(ctx);
312 342
313 return ctx; 343 return ctx;
344}
314 345
315err_unpin: 346/**
316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 347 * i915_gem_context_create_gvt - create a GVT GEM context
317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 348 * @dev: drm device *
318err_destroy: 349 *
319 idr_remove(&file_priv->context_idr, ctx->user_handle); 350 * This function is used to create a GVT specific GEM context.
320 i915_gem_context_unreference(ctx); 351 *
321 return ERR_PTR(ret); 352 * Returns:
353 * pointer to i915_gem_context on success, error pointer if failed
354 *
355 */
356struct i915_gem_context *
357i915_gem_context_create_gvt(struct drm_device *dev)
358{
359 struct i915_gem_context *ctx;
360 int ret;
361
362 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
363 return ERR_PTR(-ENODEV);
364
365 ret = i915_mutex_lock_interruptible(dev);
366 if (ret)
367 return ERR_PTR(ret);
368
369 ctx = i915_gem_create_context(dev, NULL);
370 if (IS_ERR(ctx))
371 goto out;
372
373 ctx->execlists_force_single_submission = true;
374 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
375out:
376 mutex_unlock(&dev->struct_mutex);
377 return ctx;
322} 378}
323 379
324static void i915_gem_context_unpin(struct intel_context *ctx, 380static void i915_gem_context_unpin(struct i915_gem_context *ctx,
325 struct intel_engine_cs *engine) 381 struct intel_engine_cs *engine)
326{ 382{
327 if (i915.enable_execlists) { 383 if (i915.enable_execlists) {
328 intel_lr_context_unpin(ctx, engine); 384 intel_lr_context_unpin(ctx, engine);
329 } else { 385 } else {
330 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) 386 struct intel_context *ce = &ctx->engine[engine->id];
331 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 387
388 if (ce->state)
389 i915_gem_object_ggtt_unpin(ce->state);
390
332 i915_gem_context_unreference(ctx); 391 i915_gem_context_unreference(ctx);
333 } 392 }
334} 393}
@@ -336,51 +395,48 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
336void i915_gem_context_reset(struct drm_device *dev) 395void i915_gem_context_reset(struct drm_device *dev)
337{ 396{
338 struct drm_i915_private *dev_priv = dev->dev_private; 397 struct drm_i915_private *dev_priv = dev->dev_private;
339 int i; 398
399 lockdep_assert_held(&dev->struct_mutex);
340 400
341 if (i915.enable_execlists) { 401 if (i915.enable_execlists) {
342 struct intel_context *ctx; 402 struct i915_gem_context *ctx;
343 403
344 list_for_each_entry(ctx, &dev_priv->context_list, link) 404 list_for_each_entry(ctx, &dev_priv->context_list, link)
345 intel_lr_context_reset(dev_priv, ctx); 405 intel_lr_context_reset(dev_priv, ctx);
346 } 406 }
347 407
348 for (i = 0; i < I915_NUM_ENGINES; i++) { 408 i915_gem_context_lost(dev_priv);
349 struct intel_engine_cs *engine = &dev_priv->engine[i];
350
351 if (engine->last_context) {
352 i915_gem_context_unpin(engine->last_context, engine);
353 engine->last_context = NULL;
354 }
355 }
356
357 /* Force the GPU state to be reinitialised on enabling */
358 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
359} 409}
360 410
361int i915_gem_context_init(struct drm_device *dev) 411int i915_gem_context_init(struct drm_device *dev)
362{ 412{
363 struct drm_i915_private *dev_priv = dev->dev_private; 413 struct drm_i915_private *dev_priv = dev->dev_private;
364 struct intel_context *ctx; 414 struct i915_gem_context *ctx;
365 415
366 /* Init should only be called once per module load. Eventually the 416 /* Init should only be called once per module load. Eventually the
367 * restriction on the context_disabled check can be loosened. */ 417 * restriction on the context_disabled check can be loosened. */
368 if (WARN_ON(dev_priv->kernel_context)) 418 if (WARN_ON(dev_priv->kernel_context))
369 return 0; 419 return 0;
370 420
371 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 421 if (intel_vgpu_active(dev_priv) &&
422 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
372 if (!i915.enable_execlists) { 423 if (!i915.enable_execlists) {
373 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 424 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
374 return -EINVAL; 425 return -EINVAL;
375 } 426 }
376 } 427 }
377 428
429 /* Using the simple ida interface, the max is limited by sizeof(int) */
430 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
431 ida_init(&dev_priv->context_hw_ida);
432
378 if (i915.enable_execlists) { 433 if (i915.enable_execlists) {
379 /* NB: intentionally left blank. We will allocate our own 434 /* NB: intentionally left blank. We will allocate our own
380 * backing objects as we need them, thank you very much */ 435 * backing objects as we need them, thank you very much */
381 dev_priv->hw_context_size = 0; 436 dev_priv->hw_context_size = 0;
382 } else if (HAS_HW_CONTEXTS(dev)) { 437 } else if (HAS_HW_CONTEXTS(dev_priv)) {
383 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 438 dev_priv->hw_context_size =
439 round_up(get_context_size(dev_priv), 4096);
384 if (dev_priv->hw_context_size > (1<<20)) { 440 if (dev_priv->hw_context_size > (1<<20)) {
385 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 441 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
386 dev_priv->hw_context_size); 442 dev_priv->hw_context_size);
@@ -395,6 +451,26 @@ int i915_gem_context_init(struct drm_device *dev)
395 return PTR_ERR(ctx); 451 return PTR_ERR(ctx);
396 } 452 }
397 453
454 if (!i915.enable_execlists && ctx->engine[RCS].state) {
455 int ret;
456
457 /* We may need to do things with the shrinker which
458 * require us to immediately switch back to the default
459 * context. This can cause a problem as pinning the
460 * default context also requires GTT space which may not
461 * be available. To avoid this we always pin the default
462 * context.
463 */
464 ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
465 get_context_alignment(dev_priv), 0);
466 if (ret) {
467 DRM_ERROR("Failed to pinned default global context (error %d)\n",
468 ret);
469 i915_gem_context_unreference(ctx);
470 return ret;
471 }
472 }
473
398 dev_priv->kernel_context = ctx; 474 dev_priv->kernel_context = ctx;
399 475
400 DRM_DEBUG_DRIVER("%s context support initialized\n", 476 DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -403,67 +479,48 @@ int i915_gem_context_init(struct drm_device *dev)
403 return 0; 479 return 0;
404} 480}
405 481
406void i915_gem_context_fini(struct drm_device *dev) 482void i915_gem_context_lost(struct drm_i915_private *dev_priv)
407{ 483{
408 struct drm_i915_private *dev_priv = dev->dev_private; 484 struct intel_engine_cs *engine;
409 struct intel_context *dctx = dev_priv->kernel_context;
410 int i;
411
412 if (dctx->legacy_hw_ctx.rcs_state) {
413 /* The only known way to stop the gpu from accessing the hw context is
414 * to reset it. Do this as the very last operation to avoid confusing
415 * other code, leading to spurious errors. */
416 intel_gpu_reset(dev, ALL_ENGINES);
417
418 /* When default context is created and switched to, base object refcount
419 * will be 2 (+1 from object creation and +1 from do_switch()).
420 * i915_gem_context_fini() will be called after gpu_idle() has switched
421 * to default context. So we need to unreference the base object once
422 * to offset the do_switch part, so that i915_gem_context_unreference()
423 * can then free the base object correctly. */
424 WARN_ON(!dev_priv->engine[RCS].last_context);
425
426 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
427 }
428 485
429 for (i = I915_NUM_ENGINES; --i >= 0;) { 486 lockdep_assert_held(&dev_priv->dev->struct_mutex);
430 struct intel_engine_cs *engine = &dev_priv->engine[i];
431 487
488 for_each_engine(engine, dev_priv) {
432 if (engine->last_context) { 489 if (engine->last_context) {
433 i915_gem_context_unpin(engine->last_context, engine); 490 i915_gem_context_unpin(engine->last_context, engine);
434 engine->last_context = NULL; 491 engine->last_context = NULL;
435 } 492 }
493
494 /* Force the GPU state to be reinitialised on enabling */
495 dev_priv->kernel_context->engine[engine->id].initialised =
496 engine->init_context == NULL;
436 } 497 }
437 498
438 i915_gem_context_unreference(dctx); 499 /* Force the GPU state to be reinitialised on enabling */
439 dev_priv->kernel_context = NULL; 500 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
440} 501}
441 502
442int i915_gem_context_enable(struct drm_i915_gem_request *req) 503void i915_gem_context_fini(struct drm_device *dev)
443{ 504{
444 struct intel_engine_cs *engine = req->engine; 505 struct drm_i915_private *dev_priv = dev->dev_private;
445 int ret; 506 struct i915_gem_context *dctx = dev_priv->kernel_context;
446 507
447 if (i915.enable_execlists) { 508 lockdep_assert_held(&dev->struct_mutex);
448 if (engine->init_context == NULL)
449 return 0;
450 509
451 ret = engine->init_context(req); 510 if (!i915.enable_execlists && dctx->engine[RCS].state)
452 } else 511 i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
453 ret = i915_switch_context(req);
454 512
455 if (ret) { 513 i915_gem_context_unreference(dctx);
456 DRM_ERROR("ring init context: %d\n", ret); 514 dev_priv->kernel_context = NULL;
457 return ret;
458 }
459 515
460 return 0; 516 ida_destroy(&dev_priv->context_hw_ida);
461} 517}
462 518
463static int context_idr_cleanup(int id, void *p, void *data) 519static int context_idr_cleanup(int id, void *p, void *data)
464{ 520{
465 struct intel_context *ctx = p; 521 struct i915_gem_context *ctx = p;
466 522
523 ctx->file_priv = ERR_PTR(-EBADF);
467 i915_gem_context_unreference(ctx); 524 i915_gem_context_unreference(ctx);
468 return 0; 525 return 0;
469} 526}
@@ -471,7 +528,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
471int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 528int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
472{ 529{
473 struct drm_i915_file_private *file_priv = file->driver_priv; 530 struct drm_i915_file_private *file_priv = file->driver_priv;
474 struct intel_context *ctx; 531 struct i915_gem_context *ctx;
475 532
476 idr_init(&file_priv->context_idr); 533 idr_init(&file_priv->context_idr);
477 534
@@ -491,31 +548,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
491{ 548{
492 struct drm_i915_file_private *file_priv = file->driver_priv; 549 struct drm_i915_file_private *file_priv = file->driver_priv;
493 550
551 lockdep_assert_held(&dev->struct_mutex);
552
494 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 553 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
495 idr_destroy(&file_priv->context_idr); 554 idr_destroy(&file_priv->context_idr);
496} 555}
497 556
498struct intel_context *
499i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
500{
501 struct intel_context *ctx;
502
503 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
504 if (!ctx)
505 return ERR_PTR(-ENOENT);
506
507 return ctx;
508}
509
510static inline int 557static inline int
511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 558mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
512{ 559{
560 struct drm_i915_private *dev_priv = req->i915;
513 struct intel_engine_cs *engine = req->engine; 561 struct intel_engine_cs *engine = req->engine;
514 u32 flags = hw_flags | MI_MM_SPACE_GTT; 562 u32 flags = hw_flags | MI_MM_SPACE_GTT;
515 const int num_rings = 563 const int num_rings =
516 /* Use an extended w/a on ivb+ if signalling from other rings */ 564 /* Use an extended w/a on ivb+ if signalling from other rings */
517 i915_semaphore_is_enabled(engine->dev) ? 565 i915_semaphore_is_enabled(dev_priv) ?
518 hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 566 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
519 0; 567 0;
520 int len, ret; 568 int len, ret;
521 569
@@ -524,21 +572,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
524 * explicitly, so we rely on the value at ring init, stored in 572 * explicitly, so we rely on the value at ring init, stored in
525 * itlb_before_ctx_switch. 573 * itlb_before_ctx_switch.
526 */ 574 */
527 if (IS_GEN6(engine->dev)) { 575 if (IS_GEN6(dev_priv)) {
528 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 576 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
529 if (ret) 577 if (ret)
530 return ret; 578 return ret;
531 } 579 }
532 580
533 /* These flags are for resource streamer on HSW+ */ 581 /* These flags are for resource streamer on HSW+ */
534 if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) 582 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
535 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 583 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
536 else if (INTEL_INFO(engine->dev)->gen < 8) 584 else if (INTEL_GEN(dev_priv) < 8)
537 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 585 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
538 586
539 587
540 len = 4; 588 len = 4;
541 if (INTEL_INFO(engine->dev)->gen >= 7) 589 if (INTEL_GEN(dev_priv) >= 7)
542 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 590 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
543 591
544 ret = intel_ring_begin(req, len); 592 ret = intel_ring_begin(req, len);
@@ -546,14 +594,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
546 return ret; 594 return ret;
547 595
548 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 596 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
549 if (INTEL_INFO(engine->dev)->gen >= 7) { 597 if (INTEL_GEN(dev_priv) >= 7) {
550 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); 598 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
551 if (num_rings) { 599 if (num_rings) {
552 struct intel_engine_cs *signaller; 600 struct intel_engine_cs *signaller;
553 601
554 intel_ring_emit(engine, 602 intel_ring_emit(engine,
555 MI_LOAD_REGISTER_IMM(num_rings)); 603 MI_LOAD_REGISTER_IMM(num_rings));
556 for_each_engine(signaller, to_i915(engine->dev)) { 604 for_each_engine(signaller, dev_priv) {
557 if (signaller == engine) 605 if (signaller == engine)
558 continue; 606 continue;
559 607
@@ -568,7 +616,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
568 intel_ring_emit(engine, MI_NOOP); 616 intel_ring_emit(engine, MI_NOOP);
569 intel_ring_emit(engine, MI_SET_CONTEXT); 617 intel_ring_emit(engine, MI_SET_CONTEXT);
570 intel_ring_emit(engine, 618 intel_ring_emit(engine,
571 i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 619 i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
572 flags); 620 flags);
573 /* 621 /*
574 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 622 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +624,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
576 */ 624 */
577 intel_ring_emit(engine, MI_NOOP); 625 intel_ring_emit(engine, MI_NOOP);
578 626
579 if (INTEL_INFO(engine->dev)->gen >= 7) { 627 if (INTEL_GEN(dev_priv) >= 7) {
580 if (num_rings) { 628 if (num_rings) {
581 struct intel_engine_cs *signaller; 629 struct intel_engine_cs *signaller;
582 i915_reg_t last_reg = {}; /* keep gcc quiet */ 630 i915_reg_t last_reg = {}; /* keep gcc quiet */
583 631
584 intel_ring_emit(engine, 632 intel_ring_emit(engine,
585 MI_LOAD_REGISTER_IMM(num_rings)); 633 MI_LOAD_REGISTER_IMM(num_rings));
586 for_each_engine(signaller, to_i915(engine->dev)) { 634 for_each_engine(signaller, dev_priv) {
587 if (signaller == engine) 635 if (signaller == engine)
588 continue; 636 continue;
589 637
@@ -609,45 +657,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
609 return ret; 657 return ret;
610} 658}
611 659
612static inline bool skip_rcs_switch(struct intel_engine_cs *engine, 660static int remap_l3(struct drm_i915_gem_request *req, int slice)
613 struct intel_context *to) 661{
662 u32 *remap_info = req->i915->l3_parity.remap_info[slice];
663 struct intel_engine_cs *engine = req->engine;
664 int i, ret;
665
666 if (!remap_info)
667 return 0;
668
669 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
670 if (ret)
671 return ret;
672
673 /*
674 * Note: We do not worry about the concurrent register cacheline hang
675 * here because no other code should access these registers other than
676 * at initialization time.
677 */
678 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
679 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
680 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
681 intel_ring_emit(engine, remap_info[i]);
682 }
683 intel_ring_emit(engine, MI_NOOP);
684 intel_ring_advance(engine);
685
686 return 0;
687}
688
689static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
690 struct intel_engine_cs *engine,
691 struct i915_gem_context *to)
614{ 692{
615 if (to->remap_slice) 693 if (to->remap_slice)
616 return false; 694 return false;
617 695
618 if (!to->legacy_hw_ctx.initialized) 696 if (!to->engine[RCS].initialised)
619 return false; 697 return false;
620 698
621 if (to->ppgtt && 699 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
622 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
623 return false; 700 return false;
624 701
625 return to == engine->last_context; 702 return to == engine->last_context;
626} 703}
627 704
628static bool 705static bool
629needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) 706needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
707 struct intel_engine_cs *engine,
708 struct i915_gem_context *to)
630{ 709{
631 if (!to->ppgtt) 710 if (!ppgtt)
632 return false; 711 return false;
633 712
713 /* Always load the ppgtt on first use */
714 if (!engine->last_context)
715 return true;
716
717 /* Same context without new entries, skip */
634 if (engine->last_context == to && 718 if (engine->last_context == to &&
635 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 719 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
636 return false; 720 return false;
637 721
638 if (engine->id != RCS) 722 if (engine->id != RCS)
639 return true; 723 return true;
640 724
641 if (INTEL_INFO(engine->dev)->gen < 8) 725 if (INTEL_GEN(engine->i915) < 8)
642 return true; 726 return true;
643 727
644 return false; 728 return false;
645} 729}
646 730
647static bool 731static bool
648needs_pd_load_post(struct intel_context *to, u32 hw_flags) 732needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
733 struct i915_gem_context *to,
734 u32 hw_flags)
649{ 735{
650 if (!to->ppgtt) 736 if (!ppgtt)
651 return false; 737 return false;
652 738
653 if (!IS_GEN8(to->i915)) 739 if (!IS_GEN8(to->i915))
@@ -661,18 +747,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
661 747
662static int do_rcs_switch(struct drm_i915_gem_request *req) 748static int do_rcs_switch(struct drm_i915_gem_request *req)
663{ 749{
664 struct intel_context *to = req->ctx; 750 struct i915_gem_context *to = req->ctx;
665 struct intel_engine_cs *engine = req->engine; 751 struct intel_engine_cs *engine = req->engine;
666 struct intel_context *from; 752 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
753 struct i915_gem_context *from;
667 u32 hw_flags; 754 u32 hw_flags;
668 int ret, i; 755 int ret, i;
669 756
670 if (skip_rcs_switch(engine, to)) 757 if (skip_rcs_switch(ppgtt, engine, to))
671 return 0; 758 return 0;
672 759
673 /* Trying to pin first makes error handling easier. */ 760 /* Trying to pin first makes error handling easier. */
674 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 761 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
675 get_context_alignment(engine->dev), 762 get_context_alignment(engine->i915),
676 0); 763 0);
677 if (ret) 764 if (ret)
678 return ret; 765 return ret;
@@ -694,37 +781,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
694 * 781 *
695 * XXX: We need a real interface to do this instead of trickery. 782 * XXX: We need a real interface to do this instead of trickery.
696 */ 783 */
697 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 784 ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
698 if (ret) 785 if (ret)
699 goto unpin_out; 786 goto unpin_out;
700 787
701 if (needs_pd_load_pre(engine, to)) { 788 if (needs_pd_load_pre(ppgtt, engine, to)) {
702 /* Older GENs and non render rings still want the load first, 789 /* Older GENs and non render rings still want the load first,
703 * "PP_DCLV followed by PP_DIR_BASE register through Load 790 * "PP_DCLV followed by PP_DIR_BASE register through Load
704 * Register Immediate commands in Ring Buffer before submitting 791 * Register Immediate commands in Ring Buffer before submitting
705 * a context."*/ 792 * a context."*/
706 trace_switch_mm(engine, to); 793 trace_switch_mm(engine, to);
707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 794 ret = ppgtt->switch_mm(ppgtt, req);
708 if (ret) 795 if (ret)
709 goto unpin_out; 796 goto unpin_out;
710 } 797 }
711 798
712 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 799 if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
713 /* NB: If we inhibit the restore, the context is not allowed to 800 /* NB: If we inhibit the restore, the context is not allowed to
714 * die because future work may end up depending on valid address 801 * die because future work may end up depending on valid address
715 * space. This means we must enforce that a page table load 802 * space. This means we must enforce that a page table load
716 * occur when this occurs. */ 803 * occur when this occurs. */
717 hw_flags = MI_RESTORE_INHIBIT; 804 hw_flags = MI_RESTORE_INHIBIT;
718 else if (to->ppgtt && 805 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
719 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
720 hw_flags = MI_FORCE_RESTORE; 806 hw_flags = MI_FORCE_RESTORE;
721 else 807 else
722 hw_flags = 0; 808 hw_flags = 0;
723 809
724 /* We should never emit switch_mm more than once */
725 WARN_ON(needs_pd_load_pre(engine, to) &&
726 needs_pd_load_post(to, hw_flags));
727
728 if (to != from || (hw_flags & MI_FORCE_RESTORE)) { 810 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
729 ret = mi_set_context(req, hw_flags); 811 ret = mi_set_context(req, hw_flags);
730 if (ret) 812 if (ret)
@@ -738,8 +820,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
738 * MI_SET_CONTEXT instead of when the next seqno has completed. 820 * MI_SET_CONTEXT instead of when the next seqno has completed.
739 */ 821 */
740 if (from != NULL) { 822 if (from != NULL) {
741 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 823 from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
742 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 824 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
743 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 825 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
744 * whole damn pipeline, we don't need to explicitly mark the 826 * whole damn pipeline, we don't need to explicitly mark the
745 * object dirty. The only exception is that the context must be 827 * object dirty. The only exception is that the context must be
@@ -747,10 +829,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
747 * able to defer doing this until we know the object would be 829 * able to defer doing this until we know the object would be
748 * swapped, but there is no way to do that yet. 830 * swapped, but there is no way to do that yet.
749 */ 831 */
750 from->legacy_hw_ctx.rcs_state->dirty = 1; 832 from->engine[RCS].state->dirty = 1;
751 833
752 /* obj is kept alive until the next request by its active ref */ 834 /* obj is kept alive until the next request by its active ref */
753 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 835 i915_gem_object_ggtt_unpin(from->engine[RCS].state);
754 i915_gem_context_unreference(from); 836 i915_gem_context_unreference(from);
755 } 837 }
756 i915_gem_context_reference(to); 838 i915_gem_context_reference(to);
@@ -759,9 +841,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
759 /* GEN8 does *not* require an explicit reload if the PDPs have been 841 /* GEN8 does *not* require an explicit reload if the PDPs have been
760 * setup, and we do not wish to move them. 842 * setup, and we do not wish to move them.
761 */ 843 */
762 if (needs_pd_load_post(to, hw_flags)) { 844 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
763 trace_switch_mm(engine, to); 845 trace_switch_mm(engine, to);
764 ret = to->ppgtt->switch_mm(to->ppgtt, req); 846 ret = ppgtt->switch_mm(ppgtt, req);
765 /* The hardware context switch is emitted, but we haven't 847 /* The hardware context switch is emitted, but we haven't
766 * actually changed the state - so it's probably safe to bail 848 * actually changed the state - so it's probably safe to bail
767 * here. Still, let the user know something dangerous has 849 * here. Still, let the user know something dangerous has
@@ -771,33 +853,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
771 return ret; 853 return ret;
772 } 854 }
773 855
774 if (to->ppgtt) 856 if (ppgtt)
775 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 857 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
776 858
777 for (i = 0; i < MAX_L3_SLICES; i++) { 859 for (i = 0; i < MAX_L3_SLICES; i++) {
778 if (!(to->remap_slice & (1<<i))) 860 if (!(to->remap_slice & (1<<i)))
779 continue; 861 continue;
780 862
781 ret = i915_gem_l3_remap(req, i); 863 ret = remap_l3(req, i);
782 if (ret) 864 if (ret)
783 return ret; 865 return ret;
784 866
785 to->remap_slice &= ~(1<<i); 867 to->remap_slice &= ~(1<<i);
786 } 868 }
787 869
788 if (!to->legacy_hw_ctx.initialized) { 870 if (!to->engine[RCS].initialised) {
789 if (engine->init_context) { 871 if (engine->init_context) {
790 ret = engine->init_context(req); 872 ret = engine->init_context(req);
791 if (ret) 873 if (ret)
792 return ret; 874 return ret;
793 } 875 }
794 to->legacy_hw_ctx.initialized = true; 876 to->engine[RCS].initialised = true;
795 } 877 }
796 878
797 return 0; 879 return 0;
798 880
799unpin_out: 881unpin_out:
800 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 882 i915_gem_object_ggtt_unpin(to->engine[RCS].state);
801 return ret; 883 return ret;
802} 884}
803 885
@@ -817,25 +899,24 @@ unpin_out:
817int i915_switch_context(struct drm_i915_gem_request *req) 899int i915_switch_context(struct drm_i915_gem_request *req)
818{ 900{
819 struct intel_engine_cs *engine = req->engine; 901 struct intel_engine_cs *engine = req->engine;
820 struct drm_i915_private *dev_priv = req->i915;
821 902
822 WARN_ON(i915.enable_execlists); 903 WARN_ON(i915.enable_execlists);
823 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 904 lockdep_assert_held(&req->i915->dev->struct_mutex);
824 905
825 if (engine->id != RCS || 906 if (!req->ctx->engine[engine->id].state) {
826 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 907 struct i915_gem_context *to = req->ctx;
827 struct intel_context *to = req->ctx; 908 struct i915_hw_ppgtt *ppgtt =
909 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
828 910
829 if (needs_pd_load_pre(engine, to)) { 911 if (needs_pd_load_pre(ppgtt, engine, to)) {
830 int ret; 912 int ret;
831 913
832 trace_switch_mm(engine, to); 914 trace_switch_mm(engine, to);
833 ret = to->ppgtt->switch_mm(to->ppgtt, req); 915 ret = ppgtt->switch_mm(ppgtt, req);
834 if (ret) 916 if (ret)
835 return ret; 917 return ret;
836 918
837 /* Doing a PD load always reloads the page dirs */ 919 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
838 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
839 } 920 }
840 921
841 if (to != engine->last_context) { 922 if (to != engine->last_context) {
@@ -861,7 +942,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
861{ 942{
862 struct drm_i915_gem_context_create *args = data; 943 struct drm_i915_gem_context_create *args = data;
863 struct drm_i915_file_private *file_priv = file->driver_priv; 944 struct drm_i915_file_private *file_priv = file->driver_priv;
864 struct intel_context *ctx; 945 struct i915_gem_context *ctx;
865 int ret; 946 int ret;
866 947
867 if (!contexts_enabled(dev)) 948 if (!contexts_enabled(dev))
@@ -890,7 +971,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
890{ 971{
891 struct drm_i915_gem_context_destroy *args = data; 972 struct drm_i915_gem_context_destroy *args = data;
892 struct drm_i915_file_private *file_priv = file->driver_priv; 973 struct drm_i915_file_private *file_priv = file->driver_priv;
893 struct intel_context *ctx; 974 struct i915_gem_context *ctx;
894 int ret; 975 int ret;
895 976
896 if (args->pad != 0) 977 if (args->pad != 0)
@@ -903,13 +984,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
903 if (ret) 984 if (ret)
904 return ret; 985 return ret;
905 986
906 ctx = i915_gem_context_get(file_priv, args->ctx_id); 987 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
907 if (IS_ERR(ctx)) { 988 if (IS_ERR(ctx)) {
908 mutex_unlock(&dev->struct_mutex); 989 mutex_unlock(&dev->struct_mutex);
909 return PTR_ERR(ctx); 990 return PTR_ERR(ctx);
910 } 991 }
911 992
912 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 993 idr_remove(&file_priv->context_idr, ctx->user_handle);
913 i915_gem_context_unreference(ctx); 994 i915_gem_context_unreference(ctx);
914 mutex_unlock(&dev->struct_mutex); 995 mutex_unlock(&dev->struct_mutex);
915 996
@@ -922,14 +1003,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
922{ 1003{
923 struct drm_i915_file_private *file_priv = file->driver_priv; 1004 struct drm_i915_file_private *file_priv = file->driver_priv;
924 struct drm_i915_gem_context_param *args = data; 1005 struct drm_i915_gem_context_param *args = data;
925 struct intel_context *ctx; 1006 struct i915_gem_context *ctx;
926 int ret; 1007 int ret;
927 1008
928 ret = i915_mutex_lock_interruptible(dev); 1009 ret = i915_mutex_lock_interruptible(dev);
929 if (ret) 1010 if (ret)
930 return ret; 1011 return ret;
931 1012
932 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1013 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
933 if (IS_ERR(ctx)) { 1014 if (IS_ERR(ctx)) {
934 mutex_unlock(&dev->struct_mutex); 1015 mutex_unlock(&dev->struct_mutex);
935 return PTR_ERR(ctx); 1016 return PTR_ERR(ctx);
@@ -965,14 +1046,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
965{ 1046{
966 struct drm_i915_file_private *file_priv = file->driver_priv; 1047 struct drm_i915_file_private *file_priv = file->driver_priv;
967 struct drm_i915_gem_context_param *args = data; 1048 struct drm_i915_gem_context_param *args = data;
968 struct intel_context *ctx; 1049 struct i915_gem_context *ctx;
969 int ret; 1050 int ret;
970 1051
971 ret = i915_mutex_lock_interruptible(dev); 1052 ret = i915_mutex_lock_interruptible(dev);
972 if (ret) 1053 if (ret)
973 return ret; 1054 return ret;
974 1055
975 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1056 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
976 if (IS_ERR(ctx)) { 1057 if (IS_ERR(ctx)) {
977 mutex_unlock(&dev->struct_mutex); 1058 mutex_unlock(&dev->struct_mutex);
978 return PTR_ERR(ctx); 1059 return PTR_ERR(ctx);
@@ -1004,3 +1085,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1004 1085
1005 return ret; 1086 return ret;
1006} 1087}
1088
1089int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1090 void *data, struct drm_file *file)
1091{
1092 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_i915_reset_stats *args = data;
1094 struct i915_ctx_hang_stats *hs;
1095 struct i915_gem_context *ctx;
1096 int ret;
1097
1098 if (args->flags || args->pad)
1099 return -EINVAL;
1100
1101 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1102 return -EPERM;
1103
1104 ret = i915_mutex_lock_interruptible(dev);
1105 if (ret)
1106 return ret;
1107
1108 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1109 if (IS_ERR(ctx)) {
1110 mutex_unlock(&dev->struct_mutex);
1111 return PTR_ERR(ctx);
1112 }
1113 hs = &ctx->hang_stats;
1114
1115 if (capable(CAP_SYS_ADMIN))
1116 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1117 else
1118 args->reset_count = 0;
1119
1120 args->batch_active = hs->batch_active;
1121 args->batch_pending = hs->batch_pending;
1122
1123 mutex_unlock(&dev->struct_mutex);
1124
1125 return 0;
1126}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
new file mode 100644
index 000000000000..91315557e421
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef _I915_GEM_DMABUF_H_
26#define _I915_GEM_DMABUF_H_
27
28#include <linux/dma-buf.h>
29
30static inline struct reservation_object *
31i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
32{
33 struct dma_buf *dma_buf;
34
35 if (obj->base.dma_buf)
36 dma_buf = obj->base.dma_buf;
37 else if (obj->base.import_attach)
38 dma_buf = obj->base.import_attach->dmabuf;
39 else
40 return NULL;
41
42 return dma_buf->resv;
43}
44
45#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..b144c3f5c650 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -154,7 +154,7 @@ none:
154 if (ret) 154 if (ret)
155 return ret; 155 return ret;
156 156
157 i915_gem_retire_requests(dev); 157 i915_gem_retire_requests(to_i915(dev));
158 goto search_again; 158 goto search_again;
159 } 159 }
160 160
@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
265 if (ret) 265 if (ret)
266 return ret; 266 return ret;
267 267
268 i915_gem_retire_requests(vm->dev); 268 i915_gem_retire_requests(to_i915(vm->dev));
269 269
270 WARN_ON(!list_empty(&vm->active_list)); 270 WARN_ON(!list_empty(&vm->active_list));
271 } 271 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..7941f1fe9cd2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
714static int 714static int
715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, 715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
716 struct list_head *vmas, 716 struct list_head *vmas,
717 struct intel_context *ctx, 717 struct i915_gem_context *ctx,
718 bool *need_relocs) 718 bool *need_relocs)
719{ 719{
720 struct drm_i915_gem_object *obj; 720 struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
722 struct i915_address_space *vm; 722 struct i915_address_space *vm;
723 struct list_head ordered_vmas; 723 struct list_head ordered_vmas;
724 struct list_head pinned_vmas; 724 struct list_head pinned_vmas;
725 bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; 725 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
726 int retry; 726 int retry;
727 727
728 i915_gem_retire_requests_ring(engine); 728 i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826 struct intel_engine_cs *engine, 826 struct intel_engine_cs *engine,
827 struct eb_vmas *eb, 827 struct eb_vmas *eb,
828 struct drm_i915_gem_exec_object2 *exec, 828 struct drm_i915_gem_exec_object2 *exec,
829 struct intel_context *ctx) 829 struct i915_gem_context *ctx)
830{ 830{
831 struct drm_i915_gem_relocation_entry *reloc; 831 struct drm_i915_gem_relocation_entry *reloc;
832 struct i915_address_space *vm; 832 struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
963 } 963 }
964 964
965 if (flush_chipset) 965 if (flush_chipset)
966 i915_gem_chipset_flush(req->engine->dev); 966 i915_gem_chipset_flush(req->engine->i915);
967 967
968 if (flush_domains & I915_GEM_DOMAIN_GTT) 968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb(); 969 wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
1066static struct intel_context * 1066static struct i915_gem_context *
1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1068 struct intel_engine_cs *engine, const u32 ctx_id) 1068 struct intel_engine_cs *engine, const u32 ctx_id)
1069{ 1069{
1070 struct intel_context *ctx = NULL; 1070 struct i915_gem_context *ctx = NULL;
1071 struct i915_ctx_hang_stats *hs; 1071 struct i915_ctx_hang_stats *hs;
1072 1072
1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) 1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1074 return ERR_PTR(-EINVAL); 1074 return ERR_PTR(-EINVAL);
1075 1075
1076 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 1076 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1077 if (IS_ERR(ctx)) 1077 if (IS_ERR(ctx))
1078 return ctx; 1078 return ctx;
1079 1079
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1083 return ERR_PTR(-EIO); 1083 return ERR_PTR(-EIO);
1084 } 1084 }
1085 1085
1086 if (i915.enable_execlists && !ctx->engine[engine->id].state) {
1087 int ret = intel_lr_context_deferred_alloc(ctx, engine);
1088 if (ret) {
1089 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1090 return ERR_PTR(ret);
1091 }
1092 }
1093
1094 return ctx; 1086 return ctx;
1095} 1087}
1096 1088
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1125 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 1117 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1126 i915_gem_request_assign(&obj->last_fenced_req, req); 1118 i915_gem_request_assign(&obj->last_fenced_req, req);
1127 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { 1119 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1128 struct drm_i915_private *dev_priv = to_i915(engine->dev); 1120 struct drm_i915_private *dev_priv = engine->i915;
1129 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, 1121 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1130 &dev_priv->mm.fence_list); 1122 &dev_priv->mm.fence_list);
1131 } 1123 }
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1436 struct drm_i915_gem_object *batch_obj; 1428 struct drm_i915_gem_object *batch_obj;
1437 struct drm_i915_gem_exec_object2 shadow_exec_entry; 1429 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1438 struct intel_engine_cs *engine; 1430 struct intel_engine_cs *engine;
1439 struct intel_context *ctx; 1431 struct i915_gem_context *ctx;
1440 struct i915_address_space *vm; 1432 struct i915_address_space *vm;
1441 struct i915_execbuffer_params params_master; /* XXX: will be removed later */ 1433 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1442 struct i915_execbuffer_params *params = &params_master; 1434 struct i915_execbuffer_params *params = &params_master;
@@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1454 1446
1455 dispatch_flags = 0; 1447 dispatch_flags = 0;
1456 if (args->flags & I915_EXEC_SECURE) { 1448 if (args->flags & I915_EXEC_SECURE) {
1457 if (!file->is_master || !capable(CAP_SYS_ADMIN)) 1449 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1458 return -EPERM; 1450 return -EPERM;
1459 1451
1460 dispatch_flags |= I915_DISPATCH_SECURE; 1452 dispatch_flags |= I915_DISPATCH_SECURE;
@@ -1561,7 +1553,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1561 batch_obj, 1553 batch_obj,
1562 args->batch_start_offset, 1554 args->batch_start_offset,
1563 args->batch_len, 1555 args->batch_len,
1564 file->is_master); 1556 drm_is_current_master(file));
1565 if (IS_ERR(parsed_batch_obj)) { 1557 if (IS_ERR(parsed_batch_obj)) {
1566 ret = PTR_ERR(parsed_batch_obj); 1558 ret = PTR_ERR(parsed_batch_obj);
1567 goto err; 1559 goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..2b6bdc267fb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
745void 745void
746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
747{ 747{
748 struct sg_page_iter sg_iter; 748 struct sgt_iter sgt_iter;
749 struct page *page;
749 int i; 750 int i;
750 751
751 if (obj->bit_17 == NULL) 752 if (obj->bit_17 == NULL)
752 return; 753 return;
753 754
754 i = 0; 755 i = 0;
755 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 756 for_each_sgt_page(page, sgt_iter, obj->pages) {
756 struct page *page = sg_page_iter_page(&sg_iter);
757 char new_bit_17 = page_to_phys(page) >> 17; 757 char new_bit_17 = page_to_phys(page) >> 17;
758 if ((new_bit_17 & 0x1) != 758 if ((new_bit_17 & 0x1) !=
759 (test_bit(i, obj->bit_17) != 0)) { 759 (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
775void 775void
776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
777{ 777{
778 struct sg_page_iter sg_iter; 778 struct sgt_iter sgt_iter;
779 struct page *page;
779 int page_count = obj->base.size >> PAGE_SHIFT; 780 int page_count = obj->base.size >> PAGE_SHIFT;
780 int i; 781 int i;
781 782
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
790 } 791 }
791 792
792 i = 0; 793 i = 0;
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 794
794 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 795 for_each_sgt_page(page, sgt_iter, obj->pages) {
796 if (page_to_phys(page) & (1 << 17))
795 __set_bit(i, obj->bit_17); 797 __set_bit(i, obj->bit_17);
796 else 798 else
797 __clear_bit(i, obj->bit_17); 799 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..5890017b9832 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
93 * 93 *
94 */ 94 */
95 95
96static inline struct i915_ggtt *
97i915_vm_to_ggtt(struct i915_address_space *vm)
98{
99 GEM_BUG_ON(!i915_is_ggtt(vm));
100 return container_of(vm, struct i915_ggtt, base);
101}
102
96static int 103static int
97i915_get_ggtt_vma_pages(struct i915_vma *vma); 104i915_get_ggtt_vma_pages(struct i915_vma *vma);
98 105
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
103 .type = I915_GGTT_VIEW_ROTATED, 110 .type = I915_GGTT_VIEW_ROTATED,
104}; 111};
105 112
106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 113int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
114 int enable_ppgtt)
107{ 115{
108 bool has_aliasing_ppgtt; 116 bool has_aliasing_ppgtt;
109 bool has_full_ppgtt; 117 bool has_full_ppgtt;
110 bool has_full_48bit_ppgtt; 118 bool has_full_48bit_ppgtt;
111 119
112 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 120 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
113 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 121 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
114 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; 122 has_full_48bit_ppgtt =
123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
115 124
116 if (intel_vgpu_active(dev)) 125 if (intel_vgpu_active(dev_priv))
117 has_full_ppgtt = false; /* emulation is too hard */ 126 has_full_ppgtt = false; /* emulation is too hard */
118 127
128 if (!has_aliasing_ppgtt)
129 return 0;
130
119 /* 131 /*
120 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 132 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
121 * execlists, the sole mechanism available to submit work. 133 * execlists, the sole mechanism available to submit work.
122 */ 134 */
123 if (INTEL_INFO(dev)->gen < 9 && 135 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
124 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
125 return 0; 136 return 0;
126 137
127 if (enable_ppgtt == 1) 138 if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
135 146
136#ifdef CONFIG_INTEL_IOMMU 147#ifdef CONFIG_INTEL_IOMMU
137 /* Disable ppgtt on SNB if VT-d is on. */ 148 /* Disable ppgtt on SNB if VT-d is on. */
138 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 149 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
139 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 150 DRM_INFO("Disabling PPGTT because VT-d is on\n");
140 return 0; 151 return 0;
141 } 152 }
142#endif 153#endif
143 154
144 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
145 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
146 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
147 return 0; 158 return 0;
148 } 159 }
149 160
150 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 161 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
151 return has_full_48bit_ppgtt ? 3 : 2; 162 return has_full_48bit_ppgtt ? 3 : 2;
152 else 163 else
153 return has_aliasing_ppgtt ? 1 : 0; 164 return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
866static int gen8_init_scratch(struct i915_address_space *vm) 877static int gen8_init_scratch(struct i915_address_space *vm)
867{ 878{
868 struct drm_device *dev = vm->dev; 879 struct drm_device *dev = vm->dev;
880 int ret;
869 881
870 vm->scratch_page = alloc_scratch_page(dev); 882 vm->scratch_page = alloc_scratch_page(dev);
871 if (IS_ERR(vm->scratch_page)) 883 if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
873 885
874 vm->scratch_pt = alloc_pt(dev); 886 vm->scratch_pt = alloc_pt(dev);
875 if (IS_ERR(vm->scratch_pt)) { 887 if (IS_ERR(vm->scratch_pt)) {
876 free_scratch_page(dev, vm->scratch_page); 888 ret = PTR_ERR(vm->scratch_pt);
877 return PTR_ERR(vm->scratch_pt); 889 goto free_scratch_page;
878 } 890 }
879 891
880 vm->scratch_pd = alloc_pd(dev); 892 vm->scratch_pd = alloc_pd(dev);
881 if (IS_ERR(vm->scratch_pd)) { 893 if (IS_ERR(vm->scratch_pd)) {
882 free_pt(dev, vm->scratch_pt); 894 ret = PTR_ERR(vm->scratch_pd);
883 free_scratch_page(dev, vm->scratch_page); 895 goto free_pt;
884 return PTR_ERR(vm->scratch_pd);
885 } 896 }
886 897
887 if (USES_FULL_48BIT_PPGTT(dev)) { 898 if (USES_FULL_48BIT_PPGTT(dev)) {
888 vm->scratch_pdp = alloc_pdp(dev); 899 vm->scratch_pdp = alloc_pdp(dev);
889 if (IS_ERR(vm->scratch_pdp)) { 900 if (IS_ERR(vm->scratch_pdp)) {
890 free_pd(dev, vm->scratch_pd); 901 ret = PTR_ERR(vm->scratch_pdp);
891 free_pt(dev, vm->scratch_pt); 902 goto free_pd;
892 free_scratch_page(dev, vm->scratch_page);
893 return PTR_ERR(vm->scratch_pdp);
894 } 903 }
895 } 904 }
896 905
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
900 gen8_initialize_pdp(vm, vm->scratch_pdp); 909 gen8_initialize_pdp(vm, vm->scratch_pdp);
901 910
902 return 0; 911 return 0;
912
913free_pd:
914 free_pd(dev, vm->scratch_pd);
915free_pt:
916 free_pt(dev, vm->scratch_pt);
917free_scratch_page:
918 free_scratch_page(dev, vm->scratch_page);
919
920 return ret;
903} 921}
904 922
905static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 923static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
978{ 996{
979 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 997 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
980 998
981 if (intel_vgpu_active(vm->dev)) 999 if (intel_vgpu_active(to_i915(vm->dev)))
982 gen8_ppgtt_notify_vgt(ppgtt, false); 1000 gen8_ppgtt_notify_vgt(ppgtt, false);
983 1001
984 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1002 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1529 0, 0, 1547 0, 0,
1530 GEN8_PML4E_SHIFT); 1548 GEN8_PML4E_SHIFT);
1531 1549
1532 if (intel_vgpu_active(ppgtt->base.dev)) { 1550 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
1533 ret = gen8_preallocate_top_level_pdps(ppgtt); 1551 ret = gen8_preallocate_top_level_pdps(ppgtt);
1534 if (ret) 1552 if (ret)
1535 goto free_scratch; 1553 goto free_scratch;
1536 } 1554 }
1537 } 1555 }
1538 1556
1539 if (intel_vgpu_active(ppgtt->base.dev)) 1557 if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
1540 gen8_ppgtt_notify_vgt(ppgtt, true); 1558 gen8_ppgtt_notify_vgt(ppgtt, true);
1541 1559
1542 return 0; 1560 return 0;
@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1821 enum i915_cache_level cache_level, u32 flags) 1839 enum i915_cache_level cache_level, u32 flags)
1822{ 1840{
1823 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1841 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1824 gen6_pte_t *pt_vaddr;
1825 unsigned first_entry = start >> PAGE_SHIFT; 1842 unsigned first_entry = start >> PAGE_SHIFT;
1826 unsigned act_pt = first_entry / GEN6_PTES; 1843 unsigned act_pt = first_entry / GEN6_PTES;
1827 unsigned act_pte = first_entry % GEN6_PTES; 1844 unsigned act_pte = first_entry % GEN6_PTES;
1828 struct sg_page_iter sg_iter; 1845 gen6_pte_t *pt_vaddr = NULL;
1846 struct sgt_iter sgt_iter;
1847 dma_addr_t addr;
1829 1848
1830 pt_vaddr = NULL; 1849 for_each_sgt_dma(addr, sgt_iter, pages) {
1831 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1832 if (pt_vaddr == NULL) 1850 if (pt_vaddr == NULL)
1833 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1851 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1834 1852
1835 pt_vaddr[act_pte] = 1853 pt_vaddr[act_pte] =
1836 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1854 vm->pte_encode(addr, cache_level, true, flags);
1837 cache_level, true, flags);
1838 1855
1839 if (++act_pte == GEN6_PTES) { 1856 if (++act_pte == GEN6_PTES) {
1840 kunmap_px(ppgtt, pt_vaddr); 1857 kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1843 act_pte = 0; 1860 act_pte = 0;
1844 } 1861 }
1845 } 1862 }
1863
1846 if (pt_vaddr) 1864 if (pt_vaddr)
1847 kunmap_px(ppgtt, pt_vaddr); 1865 kunmap_px(ppgtt, pt_vaddr);
1848} 1866}
@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2064 } else 2082 } else
2065 BUG(); 2083 BUG();
2066 2084
2067 if (intel_vgpu_active(dev)) 2085 if (intel_vgpu_active(dev_priv))
2068 ppgtt->switch_mm = vgpu_mm_switch; 2086 ppgtt->switch_mm = vgpu_mm_switch;
2069 2087
2070 ret = gen6_ppgtt_alloc(ppgtt); 2088 ret = gen6_ppgtt_alloc(ppgtt);
@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2158 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2141} 2159}
2142 2160
2143int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2161static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2144{ 2162{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2163 struct drm_i915_private *dev_priv = dev->dev_private;
2146 int ret = 0; 2164 int ret = 0;
@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
2179 return 0; 2197 return 0;
2180} 2198}
2181 2199
2182int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2183{
2184 struct drm_i915_private *dev_priv = req->i915;
2185 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186
2187 if (i915.enable_execlists)
2188 return 0;
2189
2190 if (!ppgtt)
2191 return 0;
2192
2193 return ppgtt->switch_mm(ppgtt, req);
2194}
2195
2196struct i915_hw_ppgtt * 2200struct i915_hw_ppgtt *
2197i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2201i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2198{ 2202{
@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2275 dev_priv->mm.interruptible = interruptible; 2279 dev_priv->mm.interruptible = interruptible;
2276} 2280}
2277 2281
2278void i915_check_and_clear_faults(struct drm_device *dev) 2282void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2279{ 2283{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 struct intel_engine_cs *engine; 2284 struct intel_engine_cs *engine;
2282 2285
2283 if (INTEL_INFO(dev)->gen < 6) 2286 if (INTEL_INFO(dev_priv)->gen < 6)
2284 return; 2287 return;
2285 2288
2286 for_each_engine(engine, dev_priv) { 2289 for_each_engine(engine, dev_priv) {
@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2324 if (INTEL_INFO(dev)->gen < 6) 2327 if (INTEL_INFO(dev)->gen < 6)
2325 return; 2328 return;
2326 2329
2327 i915_check_and_clear_faults(dev); 2330 i915_check_and_clear_faults(dev_priv);
2328 2331
2329 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 2332 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2330 true); 2333 true);
@@ -2352,29 +2355,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2352#endif 2355#endif
2353} 2356}
2354 2357
2358static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2359 dma_addr_t addr,
2360 uint64_t offset,
2361 enum i915_cache_level level,
2362 u32 unused)
2363{
2364 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2365 gen8_pte_t __iomem *pte =
2366 (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
2367 (offset >> PAGE_SHIFT);
2368 int rpm_atomic_seq;
2369
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2371
2372 gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
2373
2374 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2375 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2376
2377 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2378}
2379
2355static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2380static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2356 struct sg_table *st, 2381 struct sg_table *st,
2357 uint64_t start, 2382 uint64_t start,
2358 enum i915_cache_level level, u32 unused) 2383 enum i915_cache_level level, u32 unused)
2359{ 2384{
2360 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2385 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2361 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2386 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2362 unsigned first_entry = start >> PAGE_SHIFT; 2387 struct sgt_iter sgt_iter;
2363 gen8_pte_t __iomem *gtt_entries = 2388 gen8_pte_t __iomem *gtt_entries;
2364 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2389 gen8_pte_t gtt_entry;
2365 int i = 0; 2390 dma_addr_t addr;
2366 struct sg_page_iter sg_iter;
2367 dma_addr_t addr = 0; /* shut up gcc */
2368 int rpm_atomic_seq; 2391 int rpm_atomic_seq;
2392 int i = 0;
2369 2393
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2394 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2371 2395
2372 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2396 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2373 addr = sg_dma_address(sg_iter.sg) + 2397
2374 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2398 for_each_sgt_dma(addr, sgt_iter, st) {
2375 gen8_set_pte(&gtt_entries[i], 2399 gtt_entry = gen8_pte_encode(addr, level, true);
2376 gen8_pte_encode(addr, level, true)); 2400 gen8_set_pte(&gtt_entries[i++], gtt_entry);
2377 i++;
2378 } 2401 }
2379 2402
2380 /* 2403 /*
@@ -2385,8 +2408,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2385 * hardware should work, we must keep this posting read for paranoia. 2408 * hardware should work, we must keep this posting read for paranoia.
2386 */ 2409 */
2387 if (i != 0) 2410 if (i != 0)
2388 WARN_ON(readq(&gtt_entries[i-1]) 2411 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
2389 != gen8_pte_encode(addr, level, true));
2390 2412
2391 /* This next bit makes the above posting read even more important. We 2413 /* This next bit makes the above posting read even more important. We
2392 * want to flush the TLBs only after we're certain all the PTE updates 2414 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2424,6 +2446,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2424 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); 2446 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2425} 2447}
2426 2448
2449static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2450 dma_addr_t addr,
2451 uint64_t offset,
2452 enum i915_cache_level level,
2453 u32 flags)
2454{
2455 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2456 gen6_pte_t __iomem *pte =
2457 (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
2458 (offset >> PAGE_SHIFT);
2459 int rpm_atomic_seq;
2460
2461 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2462
2463 iowrite32(vm->pte_encode(addr, level, true, flags), pte);
2464
2465 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2466 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2467
2468 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2469}
2470
2427/* 2471/*
2428 * Binds an object into the global gtt with the specified cache level. The object 2472 * Binds an object into the global gtt with the specified cache level. The object
2429 * will be accessible to the GPU via commands whose operands reference offsets 2473 * will be accessible to the GPU via commands whose operands reference offsets
@@ -2436,21 +2480,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2436 enum i915_cache_level level, u32 flags) 2480 enum i915_cache_level level, u32 flags)
2437{ 2481{
2438 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2482 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2483 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2440 unsigned first_entry = start >> PAGE_SHIFT; 2484 struct sgt_iter sgt_iter;
2441 gen6_pte_t __iomem *gtt_entries = 2485 gen6_pte_t __iomem *gtt_entries;
2442 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2486 gen6_pte_t gtt_entry;
2443 int i = 0; 2487 dma_addr_t addr;
2444 struct sg_page_iter sg_iter;
2445 dma_addr_t addr = 0;
2446 int rpm_atomic_seq; 2488 int rpm_atomic_seq;
2489 int i = 0;
2447 2490
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2491 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2449 2492
2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2493 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2451 addr = sg_page_iter_dma_address(&sg_iter); 2494
2452 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); 2495 for_each_sgt_dma(addr, sgt_iter, st) {
2453 i++; 2496 gtt_entry = vm->pte_encode(addr, level, true, flags);
2497 iowrite32(gtt_entry, &gtt_entries[i++]);
2454 } 2498 }
2455 2499
2456 /* XXX: This serves as a posting read to make sure that the PTE has 2500 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2503,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2459 * of NUMA access patterns. Therefore, even with the way we assume 2503 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia. 2504 * hardware should work, we must keep this posting read for paranoia.
2461 */ 2505 */
2462 if (i != 0) { 2506 if (i != 0)
2463 unsigned long gtt = readl(&gtt_entries[i-1]); 2507 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465 }
2466 2508
2467 /* This next bit makes the above posting read even more important. We 2509 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates 2510 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2516,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2516 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2475} 2517}
2476 2518
2519static void nop_clear_range(struct i915_address_space *vm,
2520 uint64_t start,
2521 uint64_t length,
2522 bool use_scratch)
2523{
2524}
2525
2477static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2526static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2478 uint64_t start, 2527 uint64_t start,
2479 uint64_t length, 2528 uint64_t length,
2480 bool use_scratch) 2529 bool use_scratch)
2481{ 2530{
2482 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2531 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2483 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2532 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2484 unsigned first_entry = start >> PAGE_SHIFT; 2533 unsigned first_entry = start >> PAGE_SHIFT;
2485 unsigned num_entries = length >> PAGE_SHIFT; 2534 unsigned num_entries = length >> PAGE_SHIFT;
2486 gen8_pte_t scratch_pte, __iomem *gtt_base = 2535 gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2561,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2512 bool use_scratch) 2561 bool use_scratch)
2513{ 2562{
2514 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2563 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2515 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2564 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2516 unsigned first_entry = start >> PAGE_SHIFT; 2565 unsigned first_entry = start >> PAGE_SHIFT;
2517 unsigned num_entries = length >> PAGE_SHIFT; 2566 unsigned num_entries = length >> PAGE_SHIFT;
2518 gen6_pte_t scratch_pte, __iomem *gtt_base = 2567 gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2538,6 +2587,24 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2538 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2587 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2539} 2588}
2540 2589
2590static void i915_ggtt_insert_page(struct i915_address_space *vm,
2591 dma_addr_t addr,
2592 uint64_t offset,
2593 enum i915_cache_level cache_level,
2594 u32 unused)
2595{
2596 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2597 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2598 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2599 int rpm_atomic_seq;
2600
2601 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2602
2603 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2604
2605 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2606}
2607
2541static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2608static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2542 struct sg_table *pages, 2609 struct sg_table *pages,
2543 uint64_t start, 2610 uint64_t start,
@@ -2727,11 +2794,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2727 i915_address_space_init(&ggtt->base, dev_priv); 2794 i915_address_space_init(&ggtt->base, dev_priv);
2728 ggtt->base.total += PAGE_SIZE; 2795 ggtt->base.total += PAGE_SIZE;
2729 2796
2730 if (intel_vgpu_active(dev)) { 2797 ret = intel_vgt_balloon(dev_priv);
2731 ret = intel_vgt_balloon(dev); 2798 if (ret)
2732 if (ret) 2799 return ret;
2733 return ret;
2734 }
2735 2800
2736 if (!HAS_LLC(dev)) 2801 if (!HAS_LLC(dev))
2737 ggtt->base.mm.color_adjust = i915_gtt_color_adjust; 2802 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
@@ -2831,8 +2896,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2831 i915_gem_cleanup_stolen(dev); 2896 i915_gem_cleanup_stolen(dev);
2832 2897
2833 if (drm_mm_initialized(&ggtt->base.mm)) { 2898 if (drm_mm_initialized(&ggtt->base.mm)) {
2834 if (intel_vgpu_active(dev)) 2899 intel_vgt_deballoon(dev_priv);
2835 intel_vgt_deballoon();
2836 2900
2837 drm_mm_takedown(&ggtt->base.mm); 2901 drm_mm_takedown(&ggtt->base.mm);
2838 list_del(&ggtt->base.global_link); 2902 list_del(&ggtt->base.global_link);
@@ -3069,13 +3133,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3069 3133
3070 ret = ggtt_probe_common(dev, ggtt->size); 3134 ret = ggtt_probe_common(dev, ggtt->size);
3071 3135
3072 ggtt->base.clear_range = gen8_ggtt_clear_range;
3073 if (IS_CHERRYVIEW(dev_priv))
3074 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3075 else
3076 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3077 ggtt->base.bind_vma = ggtt_bind_vma; 3136 ggtt->base.bind_vma = ggtt_bind_vma;
3078 ggtt->base.unbind_vma = ggtt_unbind_vma; 3137 ggtt->base.unbind_vma = ggtt_unbind_vma;
3138 ggtt->base.insert_page = gen8_ggtt_insert_page;
3139 ggtt->base.clear_range = nop_clear_range;
3140 if (!USES_FULL_PPGTT(dev_priv))
3141 ggtt->base.clear_range = gen8_ggtt_clear_range;
3142
3143 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3144 if (IS_CHERRYVIEW(dev_priv))
3145 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3079 3146
3080 return ret; 3147 return ret;
3081} 3148}
@@ -3108,6 +3175,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3108 ret = ggtt_probe_common(dev, ggtt->size); 3175 ret = ggtt_probe_common(dev, ggtt->size);
3109 3176
3110 ggtt->base.clear_range = gen6_ggtt_clear_range; 3177 ggtt->base.clear_range = gen6_ggtt_clear_range;
3178 ggtt->base.insert_page = gen6_ggtt_insert_page;
3111 ggtt->base.insert_entries = gen6_ggtt_insert_entries; 3179 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3112 ggtt->base.bind_vma = ggtt_bind_vma; 3180 ggtt->base.bind_vma = ggtt_bind_vma;
3113 ggtt->base.unbind_vma = ggtt_unbind_vma; 3181 ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -3139,6 +3207,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3139 &ggtt->mappable_base, &ggtt->mappable_end); 3207 &ggtt->mappable_base, &ggtt->mappable_end);
3140 3208
3141 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); 3209 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
3210 ggtt->base.insert_page = i915_ggtt_insert_page;
3142 ggtt->base.insert_entries = i915_ggtt_insert_entries; 3211 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3143 ggtt->base.clear_range = i915_ggtt_clear_range; 3212 ggtt->base.clear_range = i915_ggtt_clear_range;
3144 ggtt->base.bind_vma = ggtt_bind_vma; 3213 ggtt->base.bind_vma = ggtt_bind_vma;
@@ -3219,14 +3288,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
3219 if (intel_iommu_gfx_mapped) 3288 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n"); 3289 DRM_INFO("VT-d active for gfx access\n");
3221#endif 3290#endif
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3230 3291
3231 return 0; 3292 return 0;
3232 3293
@@ -3250,9 +3311,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3250 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3311 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3251 struct drm_i915_gem_object *obj; 3312 struct drm_i915_gem_object *obj;
3252 struct i915_vma *vma; 3313 struct i915_vma *vma;
3253 bool flush;
3254 3314
3255 i915_check_and_clear_faults(dev); 3315 i915_check_and_clear_faults(dev_priv);
3256 3316
3257 /* First fill our portion of the GTT with scratch pages */ 3317 /* First fill our portion of the GTT with scratch pages */
3258 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 3318 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3320,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3260 3320
3261 /* Cache flush objects bound into GGTT and rebind them. */ 3321 /* Cache flush objects bound into GGTT and rebind them. */
3262 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3322 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3263 flush = false;
3264 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3323 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3265 if (vma->vm != &ggtt->base) 3324 if (vma->vm != &ggtt->base)
3266 continue; 3325 continue;
3267 3326
3268 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3327 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3269 PIN_UPDATE)); 3328 PIN_UPDATE));
3270
3271 flush = true;
3272 } 3329 }
3273 3330
3274 if (flush) 3331 if (obj->pin_display)
3275 i915_gem_clflush_object(obj, obj->pin_display); 3332 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3276 } 3333 }
3277 3334
3278 if (INTEL_INFO(dev)->gen >= 8) { 3335 if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3455,11 @@ static struct sg_table *
3398intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3455intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3399 struct drm_i915_gem_object *obj) 3456 struct drm_i915_gem_object *obj)
3400{ 3457{
3458 const size_t n_pages = obj->base.size / PAGE_SIZE;
3401 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3459 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3402 unsigned int size_pages_uv; 3460 unsigned int size_pages_uv;
3403 struct sg_page_iter sg_iter; 3461 struct sgt_iter sgt_iter;
3462 dma_addr_t dma_addr;
3404 unsigned long i; 3463 unsigned long i;
3405 dma_addr_t *page_addr_list; 3464 dma_addr_t *page_addr_list;
3406 struct sg_table *st; 3465 struct sg_table *st;
@@ -3409,7 +3468,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3409 int ret = -ENOMEM; 3468 int ret = -ENOMEM;
3410 3469
3411 /* Allocate a temporary list of source pages for random access. */ 3470 /* Allocate a temporary list of source pages for random access. */
3412 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, 3471 page_addr_list = drm_malloc_gfp(n_pages,
3413 sizeof(dma_addr_t), 3472 sizeof(dma_addr_t),
3414 GFP_TEMPORARY); 3473 GFP_TEMPORARY);
3415 if (!page_addr_list) 3474 if (!page_addr_list)
@@ -3432,11 +3491,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3432 3491
3433 /* Populate source page list from the object. */ 3492 /* Populate source page list from the object. */
3434 i = 0; 3493 i = 0;
3435 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3494 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3436 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3495 page_addr_list[i++] = dma_addr;
3437 i++;
3438 }
3439 3496
3497 GEM_BUG_ON(i != n_pages);
3440 st->nents = 0; 3498 st->nents = 0;
3441 sg = st->sgl; 3499 sg = st->sgl;
3442 3500
@@ -3634,3 +3692,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3634 return obj->base.size; 3692 return obj->base.size;
3635 } 3693 }
3636} 3694}
3695
3696void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3697{
3698 void __iomem *ptr;
3699
3700 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3701 if (WARN_ON(!vma->obj->map_and_fenceable))
3702 return ERR_PTR(-ENODEV);
3703
3704 GEM_BUG_ON(!vma->is_ggtt);
3705 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
3706
3707 ptr = vma->iomap;
3708 if (ptr == NULL) {
3709 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
3710 vma->node.start,
3711 vma->node.size);
3712 if (ptr == NULL)
3713 return ERR_PTR(-ENOMEM);
3714
3715 vma->iomap = ptr;
3716 }
3717
3718 vma->pin_count++;
3719 return ptr;
3720}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..163b564fb87d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37#include <linux/io-mapping.h>
38
37struct drm_i915_file_private; 39struct drm_i915_file_private;
38 40
39typedef uint32_t gen6_pte_t; 41typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
175 struct drm_mm_node node; 177 struct drm_mm_node node;
176 struct drm_i915_gem_object *obj; 178 struct drm_i915_gem_object *obj;
177 struct i915_address_space *vm; 179 struct i915_address_space *vm;
180 void __iomem *iomap;
178 181
179 /** Flags and address space this VMA is bound to */ 182 /** Flags and address space this VMA is bound to */
180#define GLOBAL_BIND (1<<0) 183#define GLOBAL_BIND (1<<0)
@@ -316,6 +319,11 @@ struct i915_address_space {
316 uint64_t start, 319 uint64_t start,
317 uint64_t length, 320 uint64_t length,
318 bool use_scratch); 321 bool use_scratch);
322 void (*insert_page)(struct i915_address_space *vm,
323 dma_addr_t addr,
324 uint64_t offset,
325 enum i915_cache_level cache_level,
326 u32 flags);
319 void (*insert_entries)(struct i915_address_space *vm, 327 void (*insert_entries)(struct i915_address_space *vm,
320 struct sg_table *st, 328 struct sg_table *st,
321 uint64_t start, 329 uint64_t start,
@@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
518void i915_gem_init_ggtt(struct drm_device *dev); 526void i915_gem_init_ggtt(struct drm_device *dev);
519void i915_ggtt_cleanup_hw(struct drm_device *dev); 527void i915_ggtt_cleanup_hw(struct drm_device *dev);
520 528
521int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
522int i915_ppgtt_init_hw(struct drm_device *dev); 529int i915_ppgtt_init_hw(struct drm_device *dev);
523int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
524void i915_ppgtt_release(struct kref *kref); 530void i915_ppgtt_release(struct kref *kref);
525struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, 531struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
526 struct drm_i915_file_private *fpriv); 532 struct drm_i915_file_private *fpriv);
@@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
535 kref_put(&ppgtt->ref, i915_ppgtt_release); 541 kref_put(&ppgtt->ref, i915_ppgtt_release);
536} 542}
537 543
538void i915_check_and_clear_faults(struct drm_device *dev); 544void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
539void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 545void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
540void i915_gem_restore_gtt_mappings(struct drm_device *dev); 546void i915_gem_restore_gtt_mappings(struct drm_device *dev);
541 547
@@ -560,4 +566,36 @@ size_t
560i915_ggtt_view_size(struct drm_i915_gem_object *obj, 566i915_ggtt_view_size(struct drm_i915_gem_object *obj,
561 const struct i915_ggtt_view *view); 567 const struct i915_ggtt_view *view);
562 568
569/**
570 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
571 * @vma: VMA to iomap
572 *
573 * The passed in VMA has to be pinned in the global GTT mappable region.
574 * An extra pinning of the VMA is acquired for the return iomapping,
575 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
576 * after the iomapping is no longer required.
577 *
578 * Callers must hold the struct_mutex.
579 *
580 * Returns a valid iomapped pointer or ERR_PTR.
581 */
582void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
583
584/**
585 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
586 * @vma: VMA to unpin
587 *
588 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
589 *
590 * Callers must hold the struct_mutex. This function is only valid to be
591 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
592 */
593static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
594{
595 lockdep_assert_held(&vma->vm->dev->struct_mutex);
596 GEM_BUG_ON(vma->pin_count == 0);
597 GEM_BUG_ON(vma->iomap == NULL);
598 vma->pin_count--;
599}
600
563#endif 601#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..b7c1b5fb61ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
32render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(const int gen)
33{ 33{
34 switch (gen) { 34 switch (gen) {
35 case 6: 35 case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int render_state_init(struct render_state *so, struct drm_device *dev) 48static int render_state_init(struct render_state *so,
49 struct drm_i915_private *dev_priv)
49{ 50{
50 int ret; 51 int ret;
51 52
52 so->gen = INTEL_INFO(dev)->gen; 53 so->gen = INTEL_GEN(dev_priv);
53 so->rodata = render_state_get_rodata(dev, so->gen); 54 so->rodata = render_state_get_rodata(so->gen);
54 if (so->rodata == NULL) 55 if (so->rodata == NULL)
55 return 0; 56 return 0;
56 57
57 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
58 return -EINVAL; 59 return -EINVAL;
59 60
60 so->obj = i915_gem_alloc_object(dev, 4096); 61 so->obj = i915_gem_object_create(dev_priv->dev, 4096);
61 if (so->obj == NULL) 62 if (IS_ERR(so->obj))
62 return -ENOMEM; 63 return PTR_ERR(so->obj);
63 64
64 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); 65 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
65 if (ret) 66 if (ret)
@@ -93,6 +94,7 @@ free_gem:
93 94
94static int render_state_setup(struct render_state *so) 95static int render_state_setup(struct render_state *so)
95{ 96{
97 struct drm_device *dev = so->obj->base.dev;
96 const struct intel_renderstate_rodata *rodata = so->rodata; 98 const struct intel_renderstate_rodata *rodata = so->rodata;
97 unsigned int i = 0, reloc_index = 0; 99 unsigned int i = 0, reloc_index = 0;
98 struct page *page; 100 struct page *page;
@@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so)
134 136
135 so->aux_batch_offset = i * sizeof(u32); 137 so->aux_batch_offset = i * sizeof(u32);
136 138
139 if (HAS_POOLED_EU(dev)) {
140 /*
141 * We always program 3x6 pool config but depending upon which
142 * subslice is disabled HW drops down to appropriate config
143 * shown below.
144 *
145 * In the below table 2x6 config always refers to
146 * fused-down version, native 2x6 is not available and can
147 * be ignored
148 *
149 * SNo subslices config eu pool configuration
150 * -----------------------------------------------------------
151 * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
152 * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
153 * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
154 * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
155 */
156 u32 eu_pool_config = 0x00777000;
157
158 OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
159 OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
160 OUT_BATCH(d, i, eu_pool_config);
161 OUT_BATCH(d, i, 0);
162 OUT_BATCH(d, i, 0);
163 OUT_BATCH(d, i, 0);
164 }
165
137 OUT_BATCH(d, i, MI_BATCH_BUFFER_END); 166 OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
138 so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; 167 so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
139 168
@@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
177 if (WARN_ON(engine->id != RCS)) 206 if (WARN_ON(engine->id != RCS))
178 return -ENOENT; 207 return -ENOENT;
179 208
180 ret = render_state_init(so, engine->dev); 209 ret = render_state_init(so, engine->i915);
181 if (ret) 210 if (ret)
182 return ret; 211 return ret;
183 212
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..538c30499848 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
131 unsigned long count = 0; 131 unsigned long count = 0;
132 132
133 trace_i915_gem_shrink(dev_priv, target, flags); 133 trace_i915_gem_shrink(dev_priv, target, flags);
134 i915_gem_retire_requests(dev_priv->dev); 134 i915_gem_retire_requests(dev_priv);
135
136 /*
137 * Unbinding of objects will require HW access; Let us not wake the
138 * device just to recover a little memory. If absolutely necessary,
139 * we will force the wake during oom-notifier.
140 */
141 if ((flags & I915_SHRINK_BOUND) &&
142 !intel_runtime_pm_get_if_in_use(dev_priv))
143 flags &= ~I915_SHRINK_BOUND;
135 144
136 /* 145 /*
137 * As we may completely rewrite the (un)bound list whilst unbinding 146 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
197 list_splice(&still_in_list, phase->list); 206 list_splice(&still_in_list, phase->list);
198 } 207 }
199 208
200 i915_gem_retire_requests(dev_priv->dev); 209 if (flags & I915_SHRINK_BOUND)
210 intel_runtime_pm_put(dev_priv);
211
212 i915_gem_retire_requests(dev_priv);
201 213
202 return count; 214 return count;
203} 215}
@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 357 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
346 return NOTIFY_DONE; 358 return NOTIFY_DONE;
347 359
360 intel_runtime_pm_get(dev_priv);
348 freed_pages = i915_gem_shrink_all(dev_priv); 361 freed_pages = i915_gem_shrink_all(dev_priv);
362 intel_runtime_pm_put(dev_priv);
349 363
350 /* Because we may be allocating inside our own driver, we cannot 364 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not 365 * assert that there are no objects with pinned pages that are not
@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
386 struct drm_i915_private *dev_priv = 400 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier); 401 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu; 402 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages; 403 struct i915_vma *vma, *next;
404 unsigned long freed_pages = 0;
405 int ret;
390 406
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 407 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
392 return NOTIFY_DONE; 408 return NOTIFY_DONE;
393 409
394 freed_pages = i915_gem_shrink(dev_priv, -1UL, 410 /* Force everything onto the inactive lists */
395 I915_SHRINK_BOUND | 411 ret = i915_gpu_idle(dev_priv->dev);
396 I915_SHRINK_UNBOUND | 412 if (ret)
397 I915_SHRINK_ACTIVE | 413 goto out;
398 I915_SHRINK_VMAPS); 414
415 intel_runtime_pm_get(dev_priv);
416 freed_pages += i915_gem_shrink(dev_priv, -1UL,
417 I915_SHRINK_BOUND |
418 I915_SHRINK_UNBOUND |
419 I915_SHRINK_ACTIVE |
420 I915_SHRINK_VMAPS);
421 intel_runtime_pm_put(dev_priv);
422
423 /* We also want to clear any cached iomaps as they wrap vmap */
424 list_for_each_entry_safe(vma, next,
425 &dev_priv->ggtt.base.inactive_list, vm_link) {
426 unsigned long count = vma->node.size >> PAGE_SHIFT;
427 if (vma->iomap && i915_vma_unbind(vma) == 0)
428 freed_pages += count;
429 }
399 430
431out:
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); 432 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
401 433
402 *(unsigned long *)ptr += freed_pages; 434 *(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..e9cd82290408 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
55 return -ENODEV; 55 return -ENODEV;
56 56
57 /* See the comment at the drm_mm_init() call for more about this check. 57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ 58 * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
59 if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) 59 */
60 if (start < 4096 && (IS_GEN8(dev_priv) ||
61 IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
60 start = 4096; 62 start = 4096;
61 63
62 mutex_lock(&dev_priv->mm.stolen_lock); 64 mutex_lock(&dev_priv->mm.stolen_lock);
@@ -109,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
109 if (INTEL_INFO(dev)->gen >= 3) { 111 if (INTEL_INFO(dev)->gen >= 3) {
110 u32 bsm; 112 u32 bsm;
111 113
112 pci_read_config_dword(dev->pdev, BSM, &bsm); 114 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
113 115
114 base = bsm & BSM_MASK; 116 base = bsm & INTEL_BSM_MASK;
115 } else if (IS_I865G(dev)) { 117 } else if (IS_I865G(dev)) {
116 u16 toud = 0; 118 u16 toud = 0;
117 119
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..a6eb5c47a49c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
125 if (INTEL_INFO(obj->base.dev)->gen >= 4) 125 if (INTEL_INFO(obj->base.dev)->gen >= 4)
126 return true; 126 return true;
127 127
128 if (INTEL_INFO(obj->base.dev)->gen == 3) { 128 if (IS_GEN3(obj->base.dev)) {
129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) 129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
130 return false; 130 return false;
131 } else { 131 } else {
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
229 */ 229 */
230 if (obj->map_and_fenceable && 230 if (obj->map_and_fenceable &&
231 !i915_gem_object_fence_ok(obj, args->tiling_mode)) 231 !i915_gem_object_fence_ok(obj, args->tiling_mode))
232 ret = i915_gem_object_ggtt_unbind(obj); 232 ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
233 233
234 if (ret == 0) { 234 if (ret == 0) {
235 if (obj->pages && 235 if (obj->pages &&
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706static void 706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{ 708{
709 struct sg_page_iter sg_iter; 709 struct sgt_iter sgt_iter;
710 struct page *page;
710 711
711 BUG_ON(obj->userptr.work != NULL); 712 BUG_ON(obj->userptr.work != NULL);
712 __i915_gem_userptr_set_active(obj, false); 713 __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
716 717
717 i915_gem_gtt_finish_object(obj); 718 i915_gem_gtt_finish_object(obj);
718 719
719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 720 for_each_sgt_page(page, sgt_iter, obj->pages) {
720 struct page *page = sg_page_iter_page(&sg_iter);
721
722 if (obj->dirty) 721 if (obj->dirty)
723 set_page_dirty(page); 722 set_page_dirty(page);
724 723
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
855 return 0; 854 return 0;
856} 855}
857 856
858int 857void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
859i915_gem_init_userptr(struct drm_device *dev)
860{ 858{
861 struct drm_i915_private *dev_priv = to_i915(dev);
862 mutex_init(&dev_priv->mm_lock); 859 mutex_init(&dev_priv->mm_lock);
863 hash_init(dev_priv->mm_structs); 860 hash_init(dev_priv->mm_structs);
864 return 0;
865} 861}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..34ff2459ceea 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
412 } 412 }
413 413
414 if (INTEL_INFO(dev)->gen == 7) 414 if (IS_GEN7(dev))
415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416 416
417 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 417 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
824 return error_code; 824 return error_code;
825} 825}
826 826
827static void i915_gem_record_fences(struct drm_device *dev, 827static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
828 struct drm_i915_error_state *error) 828 struct drm_i915_error_state *error)
829{ 829{
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 int i; 830 int i;
832 831
833 if (IS_GEN3(dev) || IS_GEN2(dev)) { 832 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
834 for (i = 0; i < dev_priv->num_fence_regs; i++) 833 for (i = 0; i < dev_priv->num_fence_regs; i++)
835 error->fence[i] = I915_READ(FENCE_REG(i)); 834 error->fence[i] = I915_READ(FENCE_REG(i));
836 } else if (IS_GEN5(dev) || IS_GEN4(dev)) { 835 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
837 for (i = 0; i < dev_priv->num_fence_regs; i++) 836 for (i = 0; i < dev_priv->num_fence_regs; i++)
838 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); 837 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
839 } else if (INTEL_INFO(dev)->gen >= 6) { 838 } else if (INTEL_GEN(dev_priv) >= 6) {
840 for (i = 0; i < dev_priv->num_fence_regs; i++) 839 for (i = 0; i < dev_priv->num_fence_regs; i++)
841 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); 840 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
842 } 841 }
@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
851 struct intel_engine_cs *to; 850 struct intel_engine_cs *to;
852 enum intel_engine_id id; 851 enum intel_engine_id id;
853 852
854 if (!i915_semaphore_is_enabled(dev_priv->dev)) 853 if (!i915_semaphore_is_enabled(dev_priv))
855 return; 854 return;
856 855
857 if (!error->semaphore_obj) 856 if (!error->semaphore_obj)
@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
893 } 892 }
894} 893}
895 894
896static void i915_record_ring_state(struct drm_device *dev, 895static void i915_record_ring_state(struct drm_i915_private *dev_priv,
897 struct drm_i915_error_state *error, 896 struct drm_i915_error_state *error,
898 struct intel_engine_cs *engine, 897 struct intel_engine_cs *engine,
899 struct drm_i915_error_ring *ering) 898 struct drm_i915_error_ring *ering)
900{ 899{
901 struct drm_i915_private *dev_priv = dev->dev_private; 900 if (INTEL_GEN(dev_priv) >= 6) {
902
903 if (INTEL_INFO(dev)->gen >= 6) {
904 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); 901 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
905 ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); 902 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
906 if (INTEL_INFO(dev)->gen >= 8) 903 if (INTEL_GEN(dev_priv) >= 8)
907 gen8_record_semaphore_state(dev_priv, error, engine, 904 gen8_record_semaphore_state(dev_priv, error, engine,
908 ering); 905 ering);
909 else 906 else
910 gen6_record_semaphore_state(dev_priv, engine, ering); 907 gen6_record_semaphore_state(dev_priv, engine, ering);
911 } 908 }
912 909
913 if (INTEL_INFO(dev)->gen >= 4) { 910 if (INTEL_GEN(dev_priv) >= 4) {
914 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 911 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
915 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); 912 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
916 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 913 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
917 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); 914 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
918 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); 915 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
919 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 916 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
920 if (INTEL_INFO(dev)->gen >= 8) { 917 if (INTEL_GEN(dev_priv) >= 8) {
921 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; 918 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
922 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; 919 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
923 } 920 }
@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
939 ering->tail = I915_READ_TAIL(engine); 936 ering->tail = I915_READ_TAIL(engine);
940 ering->ctl = I915_READ_CTL(engine); 937 ering->ctl = I915_READ_CTL(engine);
941 938
942 if (I915_NEED_GFX_HWS(dev)) { 939 if (I915_NEED_GFX_HWS(dev_priv)) {
943 i915_reg_t mmio; 940 i915_reg_t mmio;
944 941
945 if (IS_GEN7(dev)) { 942 if (IS_GEN7(dev_priv)) {
946 switch (engine->id) { 943 switch (engine->id) {
947 default: 944 default:
948 case RCS: 945 case RCS:
@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
958 mmio = VEBOX_HWS_PGA_GEN7; 955 mmio = VEBOX_HWS_PGA_GEN7;
959 break; 956 break;
960 } 957 }
961 } else if (IS_GEN6(engine->dev)) { 958 } else if (IS_GEN6(engine->i915)) {
962 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 959 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
963 } else { 960 } else {
964 /* XXX: gen8 returns to sanity */ 961 /* XXX: gen8 returns to sanity */
@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
971 ering->hangcheck_score = engine->hangcheck.score; 968 ering->hangcheck_score = engine->hangcheck.score;
972 ering->hangcheck_action = engine->hangcheck.action; 969 ering->hangcheck_action = engine->hangcheck.action;
973 970
974 if (USES_PPGTT(dev)) { 971 if (USES_PPGTT(dev_priv)) {
975 int i; 972 int i;
976 973
977 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 974 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
978 975
979 if (IS_GEN6(dev)) 976 if (IS_GEN6(dev_priv))
980 ering->vm_info.pp_dir_base = 977 ering->vm_info.pp_dir_base =
981 I915_READ(RING_PP_DIR_BASE_READ(engine)); 978 I915_READ(RING_PP_DIR_BASE_READ(engine));
982 else if (IS_GEN7(dev)) 979 else if (IS_GEN7(dev_priv))
983 ering->vm_info.pp_dir_base = 980 ering->vm_info.pp_dir_base =
984 I915_READ(RING_PP_DIR_BASE(engine)); 981 I915_READ(RING_PP_DIR_BASE(engine));
985 else if (INTEL_INFO(dev)->gen >= 8) 982 else if (INTEL_GEN(dev_priv) >= 8)
986 for (i = 0; i < 4; i++) { 983 for (i = 0; i < 4; i++) {
987 ering->vm_info.pdp[i] = 984 ering->vm_info.pdp[i] =
988 I915_READ(GEN8_RING_PDP_UDW(engine, i)); 985 I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
998 struct drm_i915_error_state *error, 995 struct drm_i915_error_state *error,
999 struct drm_i915_error_ring *ering) 996 struct drm_i915_error_ring *ering)
1000{ 997{
1001 struct drm_i915_private *dev_priv = engine->dev->dev_private; 998 struct drm_i915_private *dev_priv = engine->i915;
1002 struct drm_i915_gem_object *obj; 999 struct drm_i915_gem_object *obj;
1003 1000
1004 /* Currently render ring is the only HW context user */ 1001 /* Currently render ring is the only HW context user */
@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
1016 } 1013 }
1017} 1014}
1018 1015
1019static void i915_gem_record_rings(struct drm_device *dev, 1016static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1020 struct drm_i915_error_state *error) 1017 struct drm_i915_error_state *error)
1021{ 1018{
1022 struct drm_i915_private *dev_priv = to_i915(dev);
1023 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1019 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1024 struct drm_i915_gem_request *request; 1020 struct drm_i915_gem_request *request;
1025 int i, count; 1021 int i, count;
@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
1030 1026
1031 error->ring[i].pid = -1; 1027 error->ring[i].pid = -1;
1032 1028
1033 if (engine->dev == NULL) 1029 if (!intel_engine_initialized(engine))
1034 continue; 1030 continue;
1035 1031
1036 error->ring[i].valid = true; 1032 error->ring[i].valid = true;
1037 1033
1038 i915_record_ring_state(dev, error, engine, &error->ring[i]); 1034 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1039 1035
1040 request = i915_gem_find_active_request(engine); 1036 request = i915_gem_find_active_request(engine);
1041 if (request) { 1037 if (request) {
@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1301 error->eir = I915_READ(EIR); 1297 error->eir = I915_READ(EIR);
1302 error->pgtbl_er = I915_READ(PGTBL_ER); 1298 error->pgtbl_er = I915_READ(PGTBL_ER);
1303 1299
1304 i915_get_extra_instdone(dev, error->extra_instdone); 1300 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1305} 1301}
1306 1302
1307static void i915_error_capture_msg(struct drm_device *dev, 1303static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1308 struct drm_i915_error_state *error, 1304 struct drm_i915_error_state *error,
1309 u32 engine_mask, 1305 u32 engine_mask,
1310 const char *error_msg) 1306 const char *error_msg)
1311{ 1307{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 u32 ecode; 1308 u32 ecode;
1314 int ring_id = -1, len; 1309 int ring_id = -1, len;
1315 1310
@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
1317 1312
1318 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1313 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1319 "GPU HANG: ecode %d:%d:0x%08x", 1314 "GPU HANG: ecode %d:%d:0x%08x",
1320 INTEL_INFO(dev)->gen, ring_id, ecode); 1315 INTEL_GEN(dev_priv), ring_id, ecode);
1321 1316
1322 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1317 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1323 len += scnprintf(error->error_msg + len, 1318 len += scnprintf(error->error_msg + len,
@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1352 * out a structure which becomes available in debugfs for user level tools 1347 * out a structure which becomes available in debugfs for user level tools
1353 * to pick up. 1348 * to pick up.
1354 */ 1349 */
1355void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 1350void i915_capture_error_state(struct drm_i915_private *dev_priv,
1351 u32 engine_mask,
1356 const char *error_msg) 1352 const char *error_msg)
1357{ 1353{
1358 static bool warned; 1354 static bool warned;
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct drm_i915_error_state *error; 1355 struct drm_i915_error_state *error;
1361 unsigned long flags; 1356 unsigned long flags;
1362 1357
@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1372 i915_capture_gen_state(dev_priv, error); 1367 i915_capture_gen_state(dev_priv, error);
1373 i915_capture_reg_state(dev_priv, error); 1368 i915_capture_reg_state(dev_priv, error);
1374 i915_gem_capture_buffers(dev_priv, error); 1369 i915_gem_capture_buffers(dev_priv, error);
1375 i915_gem_record_fences(dev, error); 1370 i915_gem_record_fences(dev_priv, error);
1376 i915_gem_record_rings(dev, error); 1371 i915_gem_record_rings(dev_priv, error);
1377 1372
1378 do_gettimeofday(&error->time); 1373 do_gettimeofday(&error->time);
1379 1374
1380 error->overlay = intel_overlay_capture_error_state(dev); 1375 error->overlay = intel_overlay_capture_error_state(dev_priv);
1381 error->display = intel_display_capture_error_state(dev); 1376 error->display = intel_display_capture_error_state(dev_priv);
1382 1377
1383 i915_error_capture_msg(dev, error, engine_mask, error_msg); 1378 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1384 DRM_INFO("%s\n", error->error_msg); 1379 DRM_INFO("%s\n", error->error_msg);
1385 1380
1386 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1381 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1400 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1395 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1401 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1396 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1402 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1397 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1403 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); 1398 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
1404 warned = true; 1399 warned = true;
1405 } 1400 }
1406} 1401}
@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1450} 1445}
1451 1446
1452/* NB: please notice the memset */ 1447/* NB: please notice the memset */
1453void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) 1448void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1449 uint32_t *instdone)
1454{ 1450{
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1451 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1457 1452
1458 if (IS_GEN2(dev) || IS_GEN3(dev)) 1453 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
1459 instdone[0] = I915_READ(GEN2_INSTDONE); 1454 instdone[0] = I915_READ(GEN2_INSTDONE);
1460 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1455 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
1461 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1456 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1462 instdone[1] = I915_READ(GEN4_INSTDONE1); 1457 instdone[1] = I915_READ(GEN4_INSTDONE1);
1463 } else if (INTEL_INFO(dev)->gen >= 7) { 1458 } else if (INTEL_GEN(dev_priv) >= 7) {
1464 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1459 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1465 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1460 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1466 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1461 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) 68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
69 69
70/* Defines WOPCM space available to GuC firmware */
70#define GUC_WOPCM_SIZE _MMIO(0xc050) 71#define GUC_WOPCM_SIZE _MMIO(0xc050)
71#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
72
73/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 72/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
74#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 73#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
74#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
75 75
76#define GEN8_GT_PM_CONFIG _MMIO(0x138140) 76#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) 77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..22a55ac4e51c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
158 158
159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */ 160 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6(dev) || 161 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
162 NEEDS_WaRsDisableCoarsePowerGating(dev))
163 data[1] = 0; 162 data[1] = 0;
164 else 163 else
165 /* bit 0 and 1 are for Render and Media domain separately */ 164 /* bit 0 and 1 are for Render and Media domain separately */
@@ -175,94 +174,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
175 * client object which contains the page being used for the doorbell 174 * client object which contains the page being used for the doorbell
176 */ 175 */
177 176
178static void guc_init_doorbell(struct intel_guc *guc, 177static int guc_update_doorbell_id(struct intel_guc *guc,
179 struct i915_guc_client *client) 178 struct i915_guc_client *client,
179 u16 new_id)
180{ 180{
181 struct sg_table *sg = guc->ctx_pool_obj->pages;
182 void *doorbell_bitmap = guc->doorbell_bitmap;
181 struct guc_doorbell_info *doorbell; 183 struct guc_doorbell_info *doorbell;
184 struct guc_context_desc desc;
185 size_t len;
182 186
183 doorbell = client->client_base + client->doorbell_offset; 187 doorbell = client->client_base + client->doorbell_offset;
184 188
185 doorbell->db_status = GUC_DOORBELL_ENABLED; 189 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
186 doorbell->cookie = 0; 190 test_bit(client->doorbell_id, doorbell_bitmap)) {
187} 191 /* Deactivate the old doorbell */
188 192 doorbell->db_status = GUC_DOORBELL_DISABLED;
189static int guc_ring_doorbell(struct i915_guc_client *gc) 193 (void)host2guc_release_doorbell(guc, client);
190{ 194 __clear_bit(client->doorbell_id, doorbell_bitmap);
191 struct guc_process_desc *desc; 195 }
192 union guc_doorbell_qw db_cmp, db_exc, db_ret;
193 union guc_doorbell_qw *db;
194 int attempt = 2, ret = -EAGAIN;
195
196 desc = gc->client_base + gc->proc_desc_offset;
197
198 /* Update the tail so it is visible to GuC */
199 desc->tail = gc->wq_tail;
200
201 /* current cookie */
202 db_cmp.db_status = GUC_DOORBELL_ENABLED;
203 db_cmp.cookie = gc->cookie;
204
205 /* cookie to be updated */
206 db_exc.db_status = GUC_DOORBELL_ENABLED;
207 db_exc.cookie = gc->cookie + 1;
208 if (db_exc.cookie == 0)
209 db_exc.cookie = 1;
210
211 /* pointer of current doorbell cacheline */
212 db = gc->client_base + gc->doorbell_offset;
213
214 while (attempt--) {
215 /* lets ring the doorbell */
216 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
217 db_cmp.value_qw, db_exc.value_qw);
218
219 /* if the exchange was successfully executed */
220 if (db_ret.value_qw == db_cmp.value_qw) {
221 /* db was successfully rung */
222 gc->cookie = db_exc.cookie;
223 ret = 0;
224 break;
225 }
226 196
227 /* XXX: doorbell was lost and need to acquire it again */ 197 /* Update the GuC's idea of the doorbell ID */
228 if (db_ret.db_status == GUC_DOORBELL_DISABLED) 198 len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
229 break; 199 sizeof(desc) * client->ctx_index);
200 if (len != sizeof(desc))
201 return -EFAULT;
202 desc.db_id = new_id;
203 len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
204 sizeof(desc) * client->ctx_index);
205 if (len != sizeof(desc))
206 return -EFAULT;
230 207
231 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", 208 client->doorbell_id = new_id;
232 db_cmp.cookie, db_ret.cookie); 209 if (new_id == GUC_INVALID_DOORBELL_ID)
210 return 0;
233 211
234 /* update the cookie to newly read cookie from GuC */ 212 /* Activate the new doorbell */
235 db_cmp.cookie = db_ret.cookie; 213 __set_bit(new_id, doorbell_bitmap);
236 db_exc.cookie = db_ret.cookie + 1; 214 doorbell->cookie = 0;
237 if (db_exc.cookie == 0) 215 doorbell->db_status = GUC_DOORBELL_ENABLED;
238 db_exc.cookie = 1; 216 return host2guc_allocate_doorbell(guc, client);
239 } 217}
240 218
241 return ret; 219static int guc_init_doorbell(struct intel_guc *guc,
220 struct i915_guc_client *client,
221 uint16_t db_id)
222{
223 return guc_update_doorbell_id(guc, client, db_id);
242} 224}
243 225
244static void guc_disable_doorbell(struct intel_guc *guc, 226static void guc_disable_doorbell(struct intel_guc *guc,
245 struct i915_guc_client *client) 227 struct i915_guc_client *client)
246{ 228{
247 struct drm_i915_private *dev_priv = guc_to_i915(guc); 229 (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
248 struct guc_doorbell_info *doorbell;
249 i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
250 int value;
251
252 doorbell = client->client_base + client->doorbell_offset;
253 230
254 doorbell->db_status = GUC_DOORBELL_DISABLED; 231 /* XXX: wait for any interrupts */
232 /* XXX: wait for workqueue to drain */
233}
255 234
256 I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); 235static uint16_t
236select_doorbell_register(struct intel_guc *guc, uint32_t priority)
237{
238 /*
239 * The bitmap tracks which doorbell registers are currently in use.
240 * It is split into two halves; the first half is used for normal
241 * priority contexts, the second half for high-priority ones.
242 * Note that logically higher priorities are numerically less than
243 * normal ones, so the test below means "is it high-priority?"
244 */
245 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
246 const uint16_t half = GUC_MAX_DOORBELLS / 2;
247 const uint16_t start = hi_pri ? half : 0;
248 const uint16_t end = start + half;
249 uint16_t id;
257 250
258 value = I915_READ(drbreg); 251 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
259 WARN_ON((value & GEN8_DRB_VALID) != 0); 252 if (id == end)
253 id = GUC_INVALID_DOORBELL_ID;
260 254
261 I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); 255 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
262 I915_WRITE(drbreg, 0); 256 hi_pri ? "high" : "normal", id);
263 257
264 /* XXX: wait for any interrupts */ 258 return id;
265 /* XXX: wait for workqueue to drain */
266} 259}
267 260
268/* 261/*
@@ -289,37 +282,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
289 return offset; 282 return offset;
290} 283}
291 284
292static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
293{
294 /*
295 * The bitmap is split into two halves; the first half is used for
296 * normal priority contexts, the second half for high-priority ones.
297 * Note that logically higher priorities are numerically less than
298 * normal ones, so the test below means "is it high-priority?"
299 */
300 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
301 const uint16_t half = GUC_MAX_DOORBELLS / 2;
302 const uint16_t start = hi_pri ? half : 0;
303 const uint16_t end = start + half;
304 uint16_t id;
305
306 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
307 if (id == end)
308 id = GUC_INVALID_DOORBELL_ID;
309 else
310 bitmap_set(guc->doorbell_bitmap, id, 1);
311
312 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
313 hi_pri ? "high" : "normal", id);
314
315 return id;
316}
317
318static void release_doorbell(struct intel_guc *guc, uint16_t id)
319{
320 bitmap_clear(guc->doorbell_bitmap, id, 1);
321}
322
323/* 285/*
324 * Initialise the process descriptor shared with the GuC firmware. 286 * Initialise the process descriptor shared with the GuC firmware.
325 */ 287 */
@@ -361,10 +323,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
361 struct drm_i915_gem_object *client_obj = client->client_obj; 323 struct drm_i915_gem_object *client_obj = client->client_obj;
362 struct drm_i915_private *dev_priv = guc_to_i915(guc); 324 struct drm_i915_private *dev_priv = guc_to_i915(guc);
363 struct intel_engine_cs *engine; 325 struct intel_engine_cs *engine;
364 struct intel_context *ctx = client->owner; 326 struct i915_gem_context *ctx = client->owner;
365 struct guc_context_desc desc; 327 struct guc_context_desc desc;
366 struct sg_table *sg; 328 struct sg_table *sg;
367 enum intel_engine_id id;
368 u32 gfx_addr; 329 u32 gfx_addr;
369 330
370 memset(&desc, 0, sizeof(desc)); 331 memset(&desc, 0, sizeof(desc));
@@ -374,10 +335,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
374 desc.priority = client->priority; 335 desc.priority = client->priority;
375 desc.db_id = client->doorbell_id; 336 desc.db_id = client->doorbell_id;
376 337
377 for_each_engine_id(engine, dev_priv, id) { 338 for_each_engine(engine, dev_priv) {
339 struct intel_context *ce = &ctx->engine[engine->id];
378 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; 340 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
379 struct drm_i915_gem_object *obj; 341 struct drm_i915_gem_object *obj;
380 uint64_t ctx_desc;
381 342
382 /* TODO: We have a design issue to be solved here. Only when we 343 /* TODO: We have a design issue to be solved here. Only when we
383 * receive the first batch, we know which engine is used by the 344 * receive the first batch, we know which engine is used by the
@@ -386,20 +347,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
386 * for now who owns a GuC client. But for future owner of GuC 347 * for now who owns a GuC client. But for future owner of GuC
387 * client, need to make sure lrc is pinned prior to enter here. 348 * client, need to make sure lrc is pinned prior to enter here.
388 */ 349 */
389 obj = ctx->engine[id].state; 350 if (!ce->state)
390 if (!obj)
391 break; /* XXX: continue? */ 351 break; /* XXX: continue? */
392 352
393 ctx_desc = intel_lr_context_descriptor(ctx, engine); 353 lrc->context_desc = lower_32_bits(ce->lrc_desc);
394 lrc->context_desc = (u32)ctx_desc;
395 354
396 /* The state page is after PPHWSP */ 355 /* The state page is after PPHWSP */
397 gfx_addr = i915_gem_obj_ggtt_offset(obj); 356 gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
398 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; 357 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
399 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 358 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
400 (engine->guc_id << GUC_ELC_ENGINE_OFFSET); 359 (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
401 360
402 obj = ctx->engine[id].ringbuf->obj; 361 obj = ce->ringbuf->obj;
403 gfx_addr = i915_gem_obj_ggtt_offset(obj); 362 gfx_addr = i915_gem_obj_ggtt_offset(obj);
404 363
405 lrc->ring_begin = gfx_addr; 364 lrc->ring_begin = gfx_addr;
@@ -427,7 +386,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
427 desc.wq_size = client->wq_size; 386 desc.wq_size = client->wq_size;
428 387
429 /* 388 /*
430 * XXX: Take LRCs from an existing intel_context if this is not an 389 * XXX: Take LRCs from an existing context if this is not an
431 * IsKMDCreatedContext client 390 * IsKMDCreatedContext client
432 */ 391 */
433 desc.desc_private = (uintptr_t)client; 392 desc.desc_private = (uintptr_t)client;
@@ -451,47 +410,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
451 sizeof(desc) * client->ctx_index); 410 sizeof(desc) * client->ctx_index);
452} 411}
453 412
454int i915_guc_wq_check_space(struct i915_guc_client *gc) 413/**
414 * i915_guc_wq_check_space() - check that the GuC can accept a request
415 * @request: request associated with the commands
416 *
417 * Return: 0 if space is available
418 * -EAGAIN if space is not currently available
419 *
420 * This function must be called (and must return 0) before a request
421 * is submitted to the GuC via i915_guc_submit() below. Once a result
422 * of 0 has been returned, it remains valid until (but only until)
423 * the next call to submit().
424 *
425 * This precheck allows the caller to determine in advance that space
426 * will be available for the next submission before committing resources
427 * to it, and helps avoid late failures with complicated recovery paths.
428 */
429int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
455{ 430{
431 const size_t wqi_size = sizeof(struct guc_wq_item);
432 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
456 struct guc_process_desc *desc; 433 struct guc_process_desc *desc;
457 u32 size = sizeof(struct guc_wq_item); 434 u32 freespace;
458 int ret = -ETIMEDOUT, timeout_counter = 200;
459 435
460 if (!gc) 436 GEM_BUG_ON(gc == NULL);
461 return 0;
462 437
463 desc = gc->client_base + gc->proc_desc_offset; 438 desc = gc->client_base + gc->proc_desc_offset;
464 439
465 while (timeout_counter-- > 0) { 440 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
466 if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { 441 if (likely(freespace >= wqi_size))
467 ret = 0; 442 return 0;
468 break;
469 }
470 443
471 if (timeout_counter) 444 gc->no_wq_space += 1;
472 usleep_range(1000, 2000);
473 };
474 445
475 return ret; 446 return -EAGAIN;
476} 447}
477 448
478static int guc_add_workqueue_item(struct i915_guc_client *gc, 449static void guc_add_workqueue_item(struct i915_guc_client *gc,
479 struct drm_i915_gem_request *rq) 450 struct drm_i915_gem_request *rq)
480{ 451{
452 /* wqi_len is in DWords, and does not include the one-word header */
453 const size_t wqi_size = sizeof(struct guc_wq_item);
454 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
481 struct guc_process_desc *desc; 455 struct guc_process_desc *desc;
482 struct guc_wq_item *wqi; 456 struct guc_wq_item *wqi;
483 void *base; 457 void *base;
484 u32 tail, wq_len, wq_off, space; 458 u32 freespace, tail, wq_off, wq_page;
485 459
486 desc = gc->client_base + gc->proc_desc_offset; 460 desc = gc->client_base + gc->proc_desc_offset;
487 space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
488 if (WARN_ON(space < sizeof(struct guc_wq_item)))
489 return -ENOSPC; /* shouldn't happen */
490 461
491 /* postincrement WQ tail for next time */ 462 /* Free space is guaranteed, see i915_guc_wq_check_space() above */
492 wq_off = gc->wq_tail; 463 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
493 gc->wq_tail += sizeof(struct guc_wq_item); 464 GEM_BUG_ON(freespace < wqi_size);
494 gc->wq_tail &= gc->wq_size - 1; 465
466 /* The GuC firmware wants the tail index in QWords, not bytes */
467 tail = rq->tail;
468 GEM_BUG_ON(tail & 7);
469 tail >>= 3;
470 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
495 471
496 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 472 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
497 * should not have the case where structure wqi is across page, neither 473 * should not have the case where structure wqi is across page, neither
@@ -500,19 +476,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
500 * XXX: if not the case, we need save data to a temp wqi and copy it to 476 * XXX: if not the case, we need save data to a temp wqi and copy it to
501 * workqueue buffer dw by dw. 477 * workqueue buffer dw by dw.
502 */ 478 */
503 WARN_ON(sizeof(struct guc_wq_item) != 16); 479 BUILD_BUG_ON(wqi_size != 16);
504 WARN_ON(wq_off & 3); 480
481 /* postincrement WQ tail for next time */
482 wq_off = gc->wq_tail;
483 gc->wq_tail += wqi_size;
484 gc->wq_tail &= gc->wq_size - 1;
485 GEM_BUG_ON(wq_off & (wqi_size - 1));
505 486
506 /* wq starts from the page after doorbell / process_desc */ 487 /* WQ starts from the page after doorbell / process_desc */
507 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 488 wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
508 (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
509 wq_off &= PAGE_SIZE - 1; 489 wq_off &= PAGE_SIZE - 1;
490 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
510 wqi = (struct guc_wq_item *)((char *)base + wq_off); 491 wqi = (struct guc_wq_item *)((char *)base + wq_off);
511 492
512 /* len does not include the header */ 493 /* Now fill in the 4-word work queue item */
513 wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
514 wqi->header = WQ_TYPE_INORDER | 494 wqi->header = WQ_TYPE_INORDER |
515 (wq_len << WQ_LEN_SHIFT) | 495 (wqi_len << WQ_LEN_SHIFT) |
516 (rq->engine->guc_id << WQ_TARGET_SHIFT) | 496 (rq->engine->guc_id << WQ_TARGET_SHIFT) |
517 WQ_NO_WCFLUSH_WAIT; 497 WQ_NO_WCFLUSH_WAIT;
518 498
@@ -520,48 +500,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
520 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, 500 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
521 rq->engine); 501 rq->engine);
522 502
523 /* The GuC firmware wants the tail index in QWords, not bytes */
524 tail = rq->ringbuf->tail >> 3;
525 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; 503 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
526 wqi->fence_id = 0; /*XXX: what fence to be here */ 504 wqi->fence_id = rq->seqno;
527 505
528 kunmap_atomic(base); 506 kunmap_atomic(base);
507}
529 508
530 return 0; 509static int guc_ring_doorbell(struct i915_guc_client *gc)
510{
511 struct guc_process_desc *desc;
512 union guc_doorbell_qw db_cmp, db_exc, db_ret;
513 union guc_doorbell_qw *db;
514 int attempt = 2, ret = -EAGAIN;
515
516 desc = gc->client_base + gc->proc_desc_offset;
517
518 /* Update the tail so it is visible to GuC */
519 desc->tail = gc->wq_tail;
520
521 /* current cookie */
522 db_cmp.db_status = GUC_DOORBELL_ENABLED;
523 db_cmp.cookie = gc->cookie;
524
525 /* cookie to be updated */
526 db_exc.db_status = GUC_DOORBELL_ENABLED;
527 db_exc.cookie = gc->cookie + 1;
528 if (db_exc.cookie == 0)
529 db_exc.cookie = 1;
530
531 /* pointer of current doorbell cacheline */
532 db = gc->client_base + gc->doorbell_offset;
533
534 while (attempt--) {
535 /* lets ring the doorbell */
536 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
537 db_cmp.value_qw, db_exc.value_qw);
538
539 /* if the exchange was successfully executed */
540 if (db_ret.value_qw == db_cmp.value_qw) {
541 /* db was successfully rung */
542 gc->cookie = db_exc.cookie;
543 ret = 0;
544 break;
545 }
546
547 /* XXX: doorbell was lost and need to acquire it again */
548 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
549 break;
550
551 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
552 db_cmp.cookie, db_ret.cookie);
553
554 /* update the cookie to newly read cookie from GuC */
555 db_cmp.cookie = db_ret.cookie;
556 db_exc.cookie = db_ret.cookie + 1;
557 if (db_exc.cookie == 0)
558 db_exc.cookie = 1;
559 }
560
561 return ret;
531} 562}
532 563
533/** 564/**
534 * i915_guc_submit() - Submit commands through GuC 565 * i915_guc_submit() - Submit commands through GuC
535 * @client: the guc client where commands will go through
536 * @rq: request associated with the commands 566 * @rq: request associated with the commands
537 * 567 *
538 * Return: 0 if succeed 568 * Return: 0 on success, otherwise an errno.
569 * (Note: nonzero really shouldn't happen!)
570 *
571 * The caller must have already called i915_guc_wq_check_space() above
572 * with a result of 0 (success) since the last request submission. This
573 * guarantees that there is space in the work queue for the new request,
574 * so enqueuing the item cannot fail.
575 *
576 * Bad Things Will Happen if the caller violates this protocol e.g. calls
577 * submit() when check() says there's no space, or calls submit() multiple
578 * times with no intervening check().
579 *
580 * The only error here arises if the doorbell hardware isn't functioning
581 * as expected, which really shouln't happen.
539 */ 582 */
540int i915_guc_submit(struct i915_guc_client *client, 583int i915_guc_submit(struct drm_i915_gem_request *rq)
541 struct drm_i915_gem_request *rq)
542{ 584{
543 struct intel_guc *guc = client->guc;
544 unsigned int engine_id = rq->engine->guc_id; 585 unsigned int engine_id = rq->engine->guc_id;
545 int q_ret, b_ret; 586 struct intel_guc *guc = &rq->i915->guc;
587 struct i915_guc_client *client = guc->execbuf_client;
588 int b_ret;
546 589
547 q_ret = guc_add_workqueue_item(client, rq); 590 guc_add_workqueue_item(client, rq);
548 if (q_ret == 0) 591 b_ret = guc_ring_doorbell(client);
549 b_ret = guc_ring_doorbell(client);
550 592
551 client->submissions[engine_id] += 1; 593 client->submissions[engine_id] += 1;
552 if (q_ret) { 594 client->retcode = b_ret;
553 client->q_fail += 1; 595 if (b_ret)
554 client->retcode = q_ret;
555 } else if (b_ret) {
556 client->b_fail += 1; 596 client->b_fail += 1;
557 client->retcode = q_ret = b_ret; 597
558 } else {
559 client->retcode = 0;
560 }
561 guc->submissions[engine_id] += 1; 598 guc->submissions[engine_id] += 1;
562 guc->last_seqno[engine_id] = rq->seqno; 599 guc->last_seqno[engine_id] = rq->seqno;
563 600
564 return q_ret; 601 return b_ret;
565} 602}
566 603
567/* 604/*
@@ -572,7 +609,7 @@ int i915_guc_submit(struct i915_guc_client *client,
572 609
573/** 610/**
574 * gem_allocate_guc_obj() - Allocate gem object for GuC usage 611 * gem_allocate_guc_obj() - Allocate gem object for GuC usage
575 * @dev: drm device 612 * @dev_priv: driver private data structure
576 * @size: size of object 613 * @size: size of object
577 * 614 *
578 * This is a wrapper to create a gem obj. In order to use it inside GuC, the 615 * This is a wrapper to create a gem obj. In order to use it inside GuC, the
@@ -581,14 +618,13 @@ int i915_guc_submit(struct i915_guc_client *client,
581 * 618 *
582 * Return: A drm_i915_gem_object if successful, otherwise NULL. 619 * Return: A drm_i915_gem_object if successful, otherwise NULL.
583 */ 620 */
584static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, 621static struct drm_i915_gem_object *
585 u32 size) 622gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
586{ 623{
587 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_i915_gem_object *obj; 624 struct drm_i915_gem_object *obj;
589 625
590 obj = i915_gem_alloc_object(dev, size); 626 obj = i915_gem_object_create(dev_priv->dev, size);
591 if (!obj) 627 if (IS_ERR(obj))
592 return NULL; 628 return NULL;
593 629
594 if (i915_gem_object_get_pages(obj)) { 630 if (i915_gem_object_get_pages(obj)) {
@@ -623,10 +659,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
623 drm_gem_object_unreference(&obj->base); 659 drm_gem_object_unreference(&obj->base);
624} 660}
625 661
626static void guc_client_free(struct drm_device *dev, 662static void
627 struct i915_guc_client *client) 663guc_client_free(struct drm_i915_private *dev_priv,
664 struct i915_guc_client *client)
628{ 665{
629 struct drm_i915_private *dev_priv = dev->dev_private;
630 struct intel_guc *guc = &dev_priv->guc; 666 struct intel_guc *guc = &dev_priv->guc;
631 667
632 if (!client) 668 if (!client)
@@ -639,17 +675,10 @@ static void guc_client_free(struct drm_device *dev,
639 675
640 if (client->client_base) { 676 if (client->client_base) {
641 /* 677 /*
642 * If we got as far as setting up a doorbell, make sure 678 * If we got as far as setting up a doorbell, make sure we
643 * we shut it down before unmapping & deallocating the 679 * shut it down before unmapping & deallocating the memory.
644 * memory. So first disable the doorbell, then tell the
645 * GuC that we've finished with it, finally deallocate
646 * it in our bitmap
647 */ 680 */
648 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { 681 guc_disable_doorbell(guc, client);
649 guc_disable_doorbell(guc, client);
650 host2guc_release_doorbell(guc, client);
651 release_doorbell(guc, client->doorbell_id);
652 }
653 682
654 kunmap(kmap_to_page(client->client_base)); 683 kunmap(kmap_to_page(client->client_base));
655 } 684 }
@@ -664,9 +693,51 @@ static void guc_client_free(struct drm_device *dev,
664 kfree(client); 693 kfree(client);
665} 694}
666 695
696/*
697 * Borrow the first client to set up & tear down every doorbell
698 * in turn, to ensure that all doorbell h/w is (re)initialised.
699 */
700static void guc_init_doorbell_hw(struct intel_guc *guc)
701{
702 struct drm_i915_private *dev_priv = guc_to_i915(guc);
703 struct i915_guc_client *client = guc->execbuf_client;
704 uint16_t db_id, i;
705 int err;
706
707 db_id = client->doorbell_id;
708
709 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
710 i915_reg_t drbreg = GEN8_DRBREGL(i);
711 u32 value = I915_READ(drbreg);
712
713 err = guc_update_doorbell_id(guc, client, i);
714
715 /* Report update failure or unexpectedly active doorbell */
716 if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
717 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
718 i, drbreg.reg, value, err);
719 }
720
721 /* Restore to original value */
722 err = guc_update_doorbell_id(guc, client, db_id);
723 if (err)
724 DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
725 db_id, err);
726
727 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
728 i915_reg_t drbreg = GEN8_DRBREGL(i);
729 u32 value = I915_READ(drbreg);
730
731 if (i != db_id && (value & GUC_DOORBELL_ENABLED))
732 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
733 i, drbreg.reg, value);
734
735 }
736}
737
667/** 738/**
668 * guc_client_alloc() - Allocate an i915_guc_client 739 * guc_client_alloc() - Allocate an i915_guc_client
669 * @dev: drm device 740 * @dev_priv: driver private data structure
670 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW 741 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
671 * The kernel client to replace ExecList submission is created with 742 * The kernel client to replace ExecList submission is created with
672 * NORMAL priority. Priority of a client for scheduler can be HIGH, 743 * NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -676,14 +747,15 @@ static void guc_client_free(struct drm_device *dev,
676 * 747 *
677 * Return: An i915_guc_client object if success, else NULL. 748 * Return: An i915_guc_client object if success, else NULL.
678 */ 749 */
679static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, 750static struct i915_guc_client *
680 uint32_t priority, 751guc_client_alloc(struct drm_i915_private *dev_priv,
681 struct intel_context *ctx) 752 uint32_t priority,
753 struct i915_gem_context *ctx)
682{ 754{
683 struct i915_guc_client *client; 755 struct i915_guc_client *client;
684 struct drm_i915_private *dev_priv = dev->dev_private;
685 struct intel_guc *guc = &dev_priv->guc; 756 struct intel_guc *guc = &dev_priv->guc;
686 struct drm_i915_gem_object *obj; 757 struct drm_i915_gem_object *obj;
758 uint16_t db_id;
687 759
688 client = kzalloc(sizeof(*client), GFP_KERNEL); 760 client = kzalloc(sizeof(*client), GFP_KERNEL);
689 if (!client) 761 if (!client)
@@ -702,7 +774,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
702 } 774 }
703 775
704 /* The first page is doorbell/proc_desc. Two followed pages are wq. */ 776 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
705 obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); 777 obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
706 if (!obj) 778 if (!obj)
707 goto err; 779 goto err;
708 780
@@ -712,6 +784,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
712 client->wq_offset = GUC_DB_SIZE; 784 client->wq_offset = GUC_DB_SIZE;
713 client->wq_size = GUC_WQ_SIZE; 785 client->wq_size = GUC_WQ_SIZE;
714 786
787 db_id = select_doorbell_register(guc, client->priority);
788 if (db_id == GUC_INVALID_DOORBELL_ID)
789 /* XXX: evict a doorbell instead? */
790 goto err;
791
715 client->doorbell_offset = select_doorbell_cacheline(guc); 792 client->doorbell_offset = select_doorbell_cacheline(guc);
716 793
717 /* 794 /*
@@ -724,29 +801,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
724 else 801 else
725 client->proc_desc_offset = (GUC_DB_SIZE / 2); 802 client->proc_desc_offset = (GUC_DB_SIZE / 2);
726 803
727 client->doorbell_id = assign_doorbell(guc, client->priority);
728 if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
729 /* XXX: evict a doorbell instead */
730 goto err;
731
732 guc_init_proc_desc(guc, client); 804 guc_init_proc_desc(guc, client);
733 guc_init_ctx_desc(guc, client); 805 guc_init_ctx_desc(guc, client);
734 guc_init_doorbell(guc, client); 806 if (guc_init_doorbell(guc, client, db_id))
735
736 /* XXX: Any cache flushes needed? General domain mgmt calls? */
737
738 if (host2guc_allocate_doorbell(guc, client))
739 goto err; 807 goto err;
740 808
741 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", 809 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
742 priority, client, client->ctx_index, client->doorbell_id); 810 priority, client, client->ctx_index);
811 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
812 client->doorbell_id, client->doorbell_offset);
743 813
744 return client; 814 return client;
745 815
746err: 816err:
747 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); 817 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
748 818
749 guc_client_free(dev, client); 819 guc_client_free(dev_priv, client);
750 return NULL; 820 return NULL;
751} 821}
752 822
@@ -771,7 +841,7 @@ static void guc_create_log(struct intel_guc *guc)
771 841
772 obj = guc->log_obj; 842 obj = guc->log_obj;
773 if (!obj) { 843 if (!obj) {
774 obj = gem_allocate_guc_obj(dev_priv->dev, size); 844 obj = gem_allocate_guc_obj(dev_priv, size);
775 if (!obj) { 845 if (!obj) {
776 /* logging will be off */ 846 /* logging will be off */
777 i915.guc_log_level = -1; 847 i915.guc_log_level = -1;
@@ -831,7 +901,7 @@ static void guc_create_ads(struct intel_guc *guc)
831 901
832 obj = guc->ads_obj; 902 obj = guc->ads_obj;
833 if (!obj) { 903 if (!obj) {
834 obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); 904 obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
835 if (!obj) 905 if (!obj)
836 return; 906 return;
837 907
@@ -885,66 +955,65 @@ static void guc_create_ads(struct intel_guc *guc)
885 * Set up the memory resources to be shared with the GuC. At this point, 955 * Set up the memory resources to be shared with the GuC. At this point,
886 * we require just one object that can be mapped through the GGTT. 956 * we require just one object that can be mapped through the GGTT.
887 */ 957 */
888int i915_guc_submission_init(struct drm_device *dev) 958int i915_guc_submission_init(struct drm_i915_private *dev_priv)
889{ 959{
890 struct drm_i915_private *dev_priv = dev->dev_private;
891 const size_t ctxsize = sizeof(struct guc_context_desc); 960 const size_t ctxsize = sizeof(struct guc_context_desc);
892 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; 961 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
893 const size_t gemsize = round_up(poolsize, PAGE_SIZE); 962 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
894 struct intel_guc *guc = &dev_priv->guc; 963 struct intel_guc *guc = &dev_priv->guc;
895 964
965 /* Wipe bitmap & delete client in case of reinitialisation */
966 bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
967 i915_guc_submission_disable(dev_priv);
968
896 if (!i915.enable_guc_submission) 969 if (!i915.enable_guc_submission)
897 return 0; /* not enabled */ 970 return 0; /* not enabled */
898 971
899 if (guc->ctx_pool_obj) 972 if (guc->ctx_pool_obj)
900 return 0; /* already allocated */ 973 return 0; /* already allocated */
901 974
902 guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); 975 guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
903 if (!guc->ctx_pool_obj) 976 if (!guc->ctx_pool_obj)
904 return -ENOMEM; 977 return -ENOMEM;
905 978
906 ida_init(&guc->ctx_ids); 979 ida_init(&guc->ctx_ids);
907
908 guc_create_log(guc); 980 guc_create_log(guc);
909
910 guc_create_ads(guc); 981 guc_create_ads(guc);
911 982
912 return 0; 983 return 0;
913} 984}
914 985
915int i915_guc_submission_enable(struct drm_device *dev) 986int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
916{ 987{
917 struct drm_i915_private *dev_priv = dev->dev_private;
918 struct intel_guc *guc = &dev_priv->guc; 988 struct intel_guc *guc = &dev_priv->guc;
919 struct intel_context *ctx = dev_priv->kernel_context;
920 struct i915_guc_client *client; 989 struct i915_guc_client *client;
921 990
922 /* client for execbuf submission */ 991 /* client for execbuf submission */
923 client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); 992 client = guc_client_alloc(dev_priv,
993 GUC_CTX_PRIORITY_KMD_NORMAL,
994 dev_priv->kernel_context);
924 if (!client) { 995 if (!client) {
925 DRM_ERROR("Failed to create execbuf guc_client\n"); 996 DRM_ERROR("Failed to create execbuf guc_client\n");
926 return -ENOMEM; 997 return -ENOMEM;
927 } 998 }
928 999
929 guc->execbuf_client = client; 1000 guc->execbuf_client = client;
930
931 host2guc_sample_forcewake(guc, client); 1001 host2guc_sample_forcewake(guc, client);
1002 guc_init_doorbell_hw(guc);
932 1003
933 return 0; 1004 return 0;
934} 1005}
935 1006
936void i915_guc_submission_disable(struct drm_device *dev) 1007void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
937{ 1008{
938 struct drm_i915_private *dev_priv = dev->dev_private;
939 struct intel_guc *guc = &dev_priv->guc; 1009 struct intel_guc *guc = &dev_priv->guc;
940 1010
941 guc_client_free(dev, guc->execbuf_client); 1011 guc_client_free(dev_priv, guc->execbuf_client);
942 guc->execbuf_client = NULL; 1012 guc->execbuf_client = NULL;
943} 1013}
944 1014
945void i915_guc_submission_fini(struct drm_device *dev) 1015void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
946{ 1016{
947 struct drm_i915_private *dev_priv = dev->dev_private;
948 struct intel_guc *guc = &dev_priv->guc; 1017 struct intel_guc *guc = &dev_priv->guc;
949 1018
950 gem_release_guc_obj(dev_priv->guc.ads_obj); 1019 gem_release_guc_obj(dev_priv->guc.ads_obj);
@@ -967,10 +1036,10 @@ int intel_guc_suspend(struct drm_device *dev)
967{ 1036{
968 struct drm_i915_private *dev_priv = dev->dev_private; 1037 struct drm_i915_private *dev_priv = dev->dev_private;
969 struct intel_guc *guc = &dev_priv->guc; 1038 struct intel_guc *guc = &dev_priv->guc;
970 struct intel_context *ctx; 1039 struct i915_gem_context *ctx;
971 u32 data[3]; 1040 u32 data[3];
972 1041
973 if (!i915.enable_guc_submission) 1042 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
974 return 0; 1043 return 0;
975 1044
976 ctx = dev_priv->kernel_context; 1045 ctx = dev_priv->kernel_context;
@@ -993,10 +1062,10 @@ int intel_guc_resume(struct drm_device *dev)
993{ 1062{
994 struct drm_i915_private *dev_priv = dev->dev_private; 1063 struct drm_i915_private *dev_priv = dev->dev_private;
995 struct intel_guc *guc = &dev_priv->guc; 1064 struct intel_guc *guc = &dev_priv->guc;
996 struct intel_context *ctx; 1065 struct i915_gem_context *ctx;
997 u32 data[3]; 1066 u32 data[3];
998 1067
999 if (!i915.enable_guc_submission) 1068 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1000 return 0; 1069 return 0;
1001 1070
1002 ctx = dev_priv->kernel_context; 1071 ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..4378a659d962 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
336 __gen6_disable_pm_irq(dev_priv, mask); 336 __gen6_disable_pm_irq(dev_priv, mask);
337} 337}
338 338
339void gen6_reset_rps_interrupts(struct drm_device *dev) 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340{ 340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 i915_reg_t reg = gen6_pm_iir(dev_priv); 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
343 342
344 spin_lock_irq(&dev_priv->irq_lock); 343 spin_lock_irq(&dev_priv->irq_lock);
@@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
349 spin_unlock_irq(&dev_priv->irq_lock); 348 spin_unlock_irq(&dev_priv->irq_lock);
350} 349}
351 350
352void gen6_enable_rps_interrupts(struct drm_device *dev) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock); 353 spin_lock_irq(&dev_priv->irq_lock);
357 354
358 WARN_ON(dev_priv->rps.pm_iir); 355 WARN_ON(dev_priv->rps.pm_iir);
@@ -367,25 +364,11 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
367 364
368u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 365u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369{ 366{
370 /* 367 return (mask & ~dev_priv->rps.pm_intr_keep);
371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
372 * if GEN6_PM_UP_EI_EXPIRED is masked.
373 *
374 * TODO: verify if this can be reproduced on VLV,CHV.
375 */
376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
379 if (INTEL_INFO(dev_priv)->gen >= 8)
380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
382 return mask;
383} 368}
384 369
385void gen6_disable_rps_interrupts(struct drm_device *dev) 370void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
386{ 371{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 spin_lock_irq(&dev_priv->irq_lock); 372 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false; 373 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock); 374 spin_unlock_irq(&dev_priv->irq_lock);
@@ -402,7 +385,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
402 385
403 spin_unlock_irq(&dev_priv->irq_lock); 386 spin_unlock_irq(&dev_priv->irq_lock);
404 387
405 synchronize_irq(dev->irq); 388 synchronize_irq(dev_priv->dev->irq);
406} 389}
407 390
408/** 391/**
@@ -605,19 +588,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
605 588
606/** 589/**
607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 590 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
608 * @dev: drm device 591 * @dev_priv: i915 device private
609 */ 592 */
610static void i915_enable_asle_pipestat(struct drm_device *dev) 593static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
611{ 594{
612 struct drm_i915_private *dev_priv = dev->dev_private; 595 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
613
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return; 596 return;
616 597
617 spin_lock_irq(&dev_priv->irq_lock); 598 spin_lock_irq(&dev_priv->irq_lock);
618 599
619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 600 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 if (INTEL_INFO(dev)->gen >= 4) 601 if (INTEL_GEN(dev_priv) >= 4)
621 i915_enable_pipestat(dev_priv, PIPE_A, 602 i915_enable_pipestat(dev_priv, PIPE_A,
622 PIPE_LEGACY_BLC_EVENT_STATUS); 603 PIPE_LEGACY_BLC_EVENT_STATUS);
623 604
@@ -750,7 +731,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 731 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2; 732 vtotal /= 2;
752 733
753 if (IS_GEN2(dev)) 734 if (IS_GEN2(dev_priv))
754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 735 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 else 736 else
756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 737 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +748,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
767 * problem. We may need to extend this to include other platforms, 748 * problem. We may need to extend this to include other platforms,
768 * but so far testing only shows the problem on HSW. 749 * but so far testing only shows the problem on HSW.
769 */ 750 */
770 if (HAS_DDI(dev) && !position) { 751 if (HAS_DDI(dev_priv) && !position) {
771 int i, temp; 752 int i, temp;
772 753
773 for (i = 0; i < 100; i++) { 754 for (i = 0; i < 100; i++) {
@@ -835,7 +816,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
835 if (stime) 816 if (stime)
836 *stime = ktime_get(); 817 *stime = ktime_get();
837 818
838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 819 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
839 /* No obvious pixelcount register. Only query vertical 820 /* No obvious pixelcount register. Only query vertical
840 * scanout position from Display scan line register. 821 * scanout position from Display scan line register.
841 */ 822 */
@@ -897,7 +878,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
897 else 878 else
898 position += vtotal - vbl_end; 879 position += vtotal - vbl_end;
899 880
900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 881 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
901 *vpos = position; 882 *vpos = position;
902 *hpos = 0; 883 *hpos = 0;
903 } else { 884 } else {
@@ -955,9 +936,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
955 &crtc->hwmode); 936 &crtc->hwmode);
956} 937}
957 938
958static void ironlake_rps_change_irq_handler(struct drm_device *dev) 939static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
959{ 940{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg; 941 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay; 942 u8 new_delay;
963 943
@@ -986,7 +966,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
986 new_delay = dev_priv->ips.min_delay; 966 new_delay = dev_priv->ips.min_delay;
987 } 967 }
988 968
989 if (ironlake_set_drps(dev, new_delay)) 969 if (ironlake_set_drps(dev_priv, new_delay))
990 dev_priv->ips.cur_delay = new_delay; 970 dev_priv->ips.cur_delay = new_delay;
991 971
992 spin_unlock(&mchdev_lock); 972 spin_unlock(&mchdev_lock);
@@ -1175,7 +1155,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1175 new_delay += adj; 1155 new_delay += adj;
1176 new_delay = clamp_t(int, new_delay, min, max); 1156 new_delay = clamp_t(int, new_delay, min, max);
1177 1157
1178 intel_set_rps(dev_priv->dev, new_delay); 1158 intel_set_rps(dev_priv, new_delay);
1179 1159
1180 mutex_unlock(&dev_priv->rps.hw_lock); 1160 mutex_unlock(&dev_priv->rps.hw_lock);
1181out: 1161out:
@@ -1506,27 +1486,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1506 1486
1507} 1487}
1508 1488
1509static void gmbus_irq_handler(struct drm_device *dev) 1489static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1510{ 1490{
1511 struct drm_i915_private *dev_priv = dev->dev_private;
1512
1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1491 wake_up_all(&dev_priv->gmbus_wait_queue);
1514} 1492}
1515 1493
1516static void dp_aux_irq_handler(struct drm_device *dev) 1494static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1517{ 1495{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1496 wake_up_all(&dev_priv->gmbus_wait_queue);
1521} 1497}
1522 1498
1523#if defined(CONFIG_DEBUG_FS) 1499#if defined(CONFIG_DEBUG_FS)
1524static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1500static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1501 enum pipe pipe,
1525 uint32_t crc0, uint32_t crc1, 1502 uint32_t crc0, uint32_t crc1,
1526 uint32_t crc2, uint32_t crc3, 1503 uint32_t crc2, uint32_t crc3,
1527 uint32_t crc4) 1504 uint32_t crc4)
1528{ 1505{
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1506 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1531 struct intel_pipe_crc_entry *entry; 1507 struct intel_pipe_crc_entry *entry;
1532 int head, tail; 1508 int head, tail;
@@ -1550,7 +1526,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1550 1526
1551 entry = &pipe_crc->entries[head]; 1527 entry = &pipe_crc->entries[head];
1552 1528
1553 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1529 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1530 pipe);
1554 entry->crc[0] = crc0; 1531 entry->crc[0] = crc0;
1555 entry->crc[1] = crc1; 1532 entry->crc[1] = crc1;
1556 entry->crc[2] = crc2; 1533 entry->crc[2] = crc2;
@@ -1566,27 +1543,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1566} 1543}
1567#else 1544#else
1568static inline void 1545static inline void
1569display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1546display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1547 enum pipe pipe,
1570 uint32_t crc0, uint32_t crc1, 1548 uint32_t crc0, uint32_t crc1,
1571 uint32_t crc2, uint32_t crc3, 1549 uint32_t crc2, uint32_t crc3,
1572 uint32_t crc4) {} 1550 uint32_t crc4) {}
1573#endif 1551#endif
1574 1552
1575 1553
1576static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1554static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1555 enum pipe pipe)
1577{ 1556{
1578 struct drm_i915_private *dev_priv = dev->dev_private; 1557 display_pipe_crc_irq_handler(dev_priv, pipe,
1579
1580 display_pipe_crc_irq_handler(dev, pipe,
1581 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1558 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1582 0, 0, 0, 0); 1559 0, 0, 0, 0);
1583} 1560}
1584 1561
1585static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1562static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1563 enum pipe pipe)
1586{ 1564{
1587 struct drm_i915_private *dev_priv = dev->dev_private; 1565 display_pipe_crc_irq_handler(dev_priv, pipe,
1588
1589 display_pipe_crc_irq_handler(dev, pipe,
1590 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1566 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1567 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1592 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1568 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1570,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1594 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1570 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1595} 1571}
1596 1572
1597static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1573static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1574 enum pipe pipe)
1598{ 1575{
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1600 uint32_t res1, res2; 1576 uint32_t res1, res2;
1601 1577
1602 if (INTEL_INFO(dev)->gen >= 3) 1578 if (INTEL_GEN(dev_priv) >= 3)
1603 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1579 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1604 else 1580 else
1605 res1 = 0; 1581 res1 = 0;
1606 1582
1607 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1583 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1608 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1584 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1609 else 1585 else
1610 res2 = 0; 1586 res2 = 0;
1611 1587
1612 display_pipe_crc_irq_handler(dev, pipe, 1588 display_pipe_crc_irq_handler(dev_priv, pipe,
1613 I915_READ(PIPE_CRC_RES_RED(pipe)), 1589 I915_READ(PIPE_CRC_RES_RED(pipe)),
1614 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1590 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1615 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1591 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1643,18 +1619,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1643 } 1619 }
1644} 1620}
1645 1621
1646static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1622static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1623 enum pipe pipe)
1647{ 1624{
1648 if (!drm_handle_vblank(dev, pipe)) 1625 bool ret;
1649 return false;
1650 1626
1651 return true; 1627 ret = drm_handle_vblank(dev_priv->dev, pipe);
1628 if (ret)
1629 intel_finish_page_flip_mmio(dev_priv, pipe);
1630
1631 return ret;
1652} 1632}
1653 1633
1654static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1634static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1655 u32 pipe_stats[I915_MAX_PIPES]) 1635 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1656{ 1636{
1657 struct drm_i915_private *dev_priv = dev->dev_private;
1658 int pipe; 1637 int pipe;
1659 1638
1660 spin_lock(&dev_priv->irq_lock); 1639 spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1689,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1710 spin_unlock(&dev_priv->irq_lock); 1689 spin_unlock(&dev_priv->irq_lock);
1711} 1690}
1712 1691
1713static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1692static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1714 u32 pipe_stats[I915_MAX_PIPES]) 1693 u32 pipe_stats[I915_MAX_PIPES])
1715{ 1694{
1716 struct drm_i915_private *dev_priv = to_i915(dev);
1717 enum pipe pipe; 1695 enum pipe pipe;
1718 1696
1719 for_each_pipe(dev_priv, pipe) { 1697 for_each_pipe(dev_priv, pipe) {
1720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1698 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1721 intel_pipe_handle_vblank(dev, pipe)) 1699 intel_pipe_handle_vblank(dev_priv, pipe))
1722 intel_check_page_flip(dev, pipe); 1700 intel_check_page_flip(dev_priv, pipe);
1723 1701
1724 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1702 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1725 intel_prepare_page_flip(dev, pipe); 1703 intel_finish_page_flip_cs(dev_priv, pipe);
1726 intel_finish_page_flip(dev, pipe);
1727 }
1728 1704
1729 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1705 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1730 i9xx_pipe_crc_irq_handler(dev, pipe); 1706 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1731 1707
1732 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1708 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1733 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1709 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1734 } 1710 }
1735 1711
1736 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1712 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1737 gmbus_irq_handler(dev); 1713 gmbus_irq_handler(dev_priv);
1738} 1714}
1739 1715
1740static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1716static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1723,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1747 return hotplug_status; 1723 return hotplug_status;
1748} 1724}
1749 1725
1750static void i9xx_hpd_irq_handler(struct drm_device *dev, 1726static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1751 u32 hotplug_status) 1727 u32 hotplug_status)
1752{ 1728{
1753 u32 pin_mask = 0, long_mask = 0; 1729 u32 pin_mask = 0, long_mask = 0;
1754 1730
1755 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1731 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1732 IS_CHERRYVIEW(dev_priv)) {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1757 1734
1758 if (hotplug_trigger) { 1735 if (hotplug_trigger) {
@@ -1760,11 +1737,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1760 hotplug_trigger, hpd_status_g4x, 1737 hotplug_trigger, hpd_status_g4x,
1761 i9xx_port_hotplug_long_detect); 1738 i9xx_port_hotplug_long_detect);
1762 1739
1763 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1740 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1764 } 1741 }
1765 1742
1766 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1743 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1767 dp_aux_irq_handler(dev); 1744 dp_aux_irq_handler(dev_priv);
1768 } else { 1745 } else {
1769 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1746 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1770 1747
@@ -1772,7 +1749,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1772 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1749 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1773 hotplug_trigger, hpd_status_i915, 1750 hotplug_trigger, hpd_status_i915,
1774 i9xx_port_hotplug_long_detect); 1751 i9xx_port_hotplug_long_detect);
1775 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1752 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1776 } 1753 }
1777 } 1754 }
1778} 1755}
@@ -1831,7 +1808,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1831 1808
1832 /* Call regardless, as some status bits might not be 1809 /* Call regardless, as some status bits might not be
1833 * signalled in iir */ 1810 * signalled in iir */
1834 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1811 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1835 1812
1836 /* 1813 /*
1837 * VLV_IIR is single buffered, and reflects the level 1814 * VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1827,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1850 gen6_rps_irq_handler(dev_priv, pm_iir); 1827 gen6_rps_irq_handler(dev_priv, pm_iir);
1851 1828
1852 if (hotplug_status) 1829 if (hotplug_status)
1853 i9xx_hpd_irq_handler(dev, hotplug_status); 1830 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1854 1831
1855 valleyview_pipestat_irq_handler(dev, pipe_stats); 1832 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1856 } while (0); 1833 } while (0);
1857 1834
1858 enable_rpm_wakeref_asserts(dev_priv); 1835 enable_rpm_wakeref_asserts(dev_priv);
@@ -1911,7 +1888,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1911 1888
1912 /* Call regardless, as some status bits might not be 1889 /* Call regardless, as some status bits might not be
1913 * signalled in iir */ 1890 * signalled in iir */
1914 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1891 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1915 1892
1916 /* 1893 /*
1917 * VLV_IIR is single buffered, and reflects the level 1894 * VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1904,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1927 gen8_gt_irq_handler(dev_priv, gt_iir); 1904 gen8_gt_irq_handler(dev_priv, gt_iir);
1928 1905
1929 if (hotplug_status) 1906 if (hotplug_status)
1930 i9xx_hpd_irq_handler(dev, hotplug_status); 1907 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1931 1908
1932 valleyview_pipestat_irq_handler(dev, pipe_stats); 1909 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1933 } while (0); 1910 } while (0);
1934 1911
1935 enable_rpm_wakeref_asserts(dev_priv); 1912 enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1914,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1937 return ret; 1914 return ret;
1938} 1915}
1939 1916
1940static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1917static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1918 u32 hotplug_trigger,
1941 const u32 hpd[HPD_NUM_PINS]) 1919 const u32 hpd[HPD_NUM_PINS])
1942{ 1920{
1943 struct drm_i915_private *dev_priv = to_i915(dev);
1944 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1921 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1945 1922
1946 /* 1923 /*
@@ -1966,16 +1943,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1966 dig_hotplug_reg, hpd, 1943 dig_hotplug_reg, hpd,
1967 pch_port_hotplug_long_detect); 1944 pch_port_hotplug_long_detect);
1968 1945
1969 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1946 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1970} 1947}
1971 1948
1972static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1949static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1973{ 1950{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 int pipe; 1951 int pipe;
1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1952 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977 1953
1978 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1954 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1979 1955
1980 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1956 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1981 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1957 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1961,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1985 } 1961 }
1986 1962
1987 if (pch_iir & SDE_AUX_MASK) 1963 if (pch_iir & SDE_AUX_MASK)
1988 dp_aux_irq_handler(dev); 1964 dp_aux_irq_handler(dev_priv);
1989 1965
1990 if (pch_iir & SDE_GMBUS) 1966 if (pch_iir & SDE_GMBUS)
1991 gmbus_irq_handler(dev); 1967 gmbus_irq_handler(dev_priv);
1992 1968
1993 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1969 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1994 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1970 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1994,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1994 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2019} 1995}
2020 1996
2021static void ivb_err_int_handler(struct drm_device *dev) 1997static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2022{ 1998{
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 err_int = I915_READ(GEN7_ERR_INT); 1999 u32 err_int = I915_READ(GEN7_ERR_INT);
2025 enum pipe pipe; 2000 enum pipe pipe;
2026 2001
@@ -2032,19 +2007,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2007 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033 2008
2034 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2009 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2035 if (IS_IVYBRIDGE(dev)) 2010 if (IS_IVYBRIDGE(dev_priv))
2036 ivb_pipe_crc_irq_handler(dev, pipe); 2011 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2037 else 2012 else
2038 hsw_pipe_crc_irq_handler(dev, pipe); 2013 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2039 } 2014 }
2040 } 2015 }
2041 2016
2042 I915_WRITE(GEN7_ERR_INT, err_int); 2017 I915_WRITE(GEN7_ERR_INT, err_int);
2043} 2018}
2044 2019
2045static void cpt_serr_int_handler(struct drm_device *dev) 2020static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2046{ 2021{
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 serr_int = I915_READ(SERR_INT); 2022 u32 serr_int = I915_READ(SERR_INT);
2049 2023
2050 if (serr_int & SERR_INT_POISON) 2024 if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2036,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2062 I915_WRITE(SERR_INT, serr_int); 2036 I915_WRITE(SERR_INT, serr_int);
2063} 2037}
2064 2038
2065static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2039static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2066{ 2040{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 int pipe; 2041 int pipe;
2069 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2042 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2070 2043
2071 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2044 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2072 2045
2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2046 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2047 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2051,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2078 } 2051 }
2079 2052
2080 if (pch_iir & SDE_AUX_MASK_CPT) 2053 if (pch_iir & SDE_AUX_MASK_CPT)
2081 dp_aux_irq_handler(dev); 2054 dp_aux_irq_handler(dev_priv);
2082 2055
2083 if (pch_iir & SDE_GMBUS_CPT) 2056 if (pch_iir & SDE_GMBUS_CPT)
2084 gmbus_irq_handler(dev); 2057 gmbus_irq_handler(dev_priv);
2085 2058
2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2059 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2060 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2069,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2096 I915_READ(FDI_RX_IIR(pipe))); 2069 I915_READ(FDI_RX_IIR(pipe)));
2097 2070
2098 if (pch_iir & SDE_ERROR_CPT) 2071 if (pch_iir & SDE_ERROR_CPT)
2099 cpt_serr_int_handler(dev); 2072 cpt_serr_int_handler(dev_priv);
2100} 2073}
2101 2074
2102static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2075static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2103{ 2076{
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2077 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2106 ~SDE_PORTE_HOTPLUG_SPT; 2078 ~SDE_PORTE_HOTPLUG_SPT;
2107 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2079 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2102,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2130 } 2102 }
2131 2103
2132 if (pin_mask) 2104 if (pin_mask)
2133 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2105 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 2106
2135 if (pch_iir & SDE_GMBUS_CPT) 2107 if (pch_iir & SDE_GMBUS_CPT)
2136 gmbus_irq_handler(dev); 2108 gmbus_irq_handler(dev_priv);
2137} 2109}
2138 2110
2139static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2111static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2112 u32 hotplug_trigger,
2140 const u32 hpd[HPD_NUM_PINS]) 2113 const u32 hpd[HPD_NUM_PINS])
2141{ 2114{
2142 struct drm_i915_private *dev_priv = to_i915(dev);
2143 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2115 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2144 2116
2145 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2117 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2121,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2149 dig_hotplug_reg, hpd, 2121 dig_hotplug_reg, hpd,
2150 ilk_port_hotplug_long_detect); 2122 ilk_port_hotplug_long_detect);
2151 2123
2152 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2124 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2153} 2125}
2154 2126
2155static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2127static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2128 u32 de_iir)
2156{ 2129{
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 enum pipe pipe; 2130 enum pipe pipe;
2159 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2131 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2160 2132
2161 if (hotplug_trigger) 2133 if (hotplug_trigger)
2162 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2134 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2163 2135
2164 if (de_iir & DE_AUX_CHANNEL_A) 2136 if (de_iir & DE_AUX_CHANNEL_A)
2165 dp_aux_irq_handler(dev); 2137 dp_aux_irq_handler(dev_priv);
2166 2138
2167 if (de_iir & DE_GSE) 2139 if (de_iir & DE_GSE)
2168 intel_opregion_asle_intr(dev); 2140 intel_opregion_asle_intr(dev_priv);
2169 2141
2170 if (de_iir & DE_POISON) 2142 if (de_iir & DE_POISON)
2171 DRM_ERROR("Poison interrupt\n"); 2143 DRM_ERROR("Poison interrupt\n");
2172 2144
2173 for_each_pipe(dev_priv, pipe) { 2145 for_each_pipe(dev_priv, pipe) {
2174 if (de_iir & DE_PIPE_VBLANK(pipe) && 2146 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2175 intel_pipe_handle_vblank(dev, pipe)) 2147 intel_pipe_handle_vblank(dev_priv, pipe))
2176 intel_check_page_flip(dev, pipe); 2148 intel_check_page_flip(dev_priv, pipe);
2177 2149
2178 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2150 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2179 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2151 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2180 2152
2181 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2153 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2182 i9xx_pipe_crc_irq_handler(dev, pipe); 2154 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2183 2155
2184 /* plane/pipes map 1:1 on ilk+ */ 2156 /* plane/pipes map 1:1 on ilk+ */
2185 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2157 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2186 intel_prepare_page_flip(dev, pipe); 2158 intel_finish_page_flip_cs(dev_priv, pipe);
2187 intel_finish_page_flip_plane(dev, pipe);
2188 }
2189 } 2159 }
2190 2160
2191 /* check event from PCH */ 2161 /* check event from PCH */
2192 if (de_iir & DE_PCH_EVENT) { 2162 if (de_iir & DE_PCH_EVENT) {
2193 u32 pch_iir = I915_READ(SDEIIR); 2163 u32 pch_iir = I915_READ(SDEIIR);
2194 2164
2195 if (HAS_PCH_CPT(dev)) 2165 if (HAS_PCH_CPT(dev_priv))
2196 cpt_irq_handler(dev, pch_iir); 2166 cpt_irq_handler(dev_priv, pch_iir);
2197 else 2167 else
2198 ibx_irq_handler(dev, pch_iir); 2168 ibx_irq_handler(dev_priv, pch_iir);
2199 2169
2200 /* should clear PCH hotplug event before clear CPU irq */ 2170 /* should clear PCH hotplug event before clear CPU irq */
2201 I915_WRITE(SDEIIR, pch_iir); 2171 I915_WRITE(SDEIIR, pch_iir);
2202 } 2172 }
2203 2173
2204 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2174 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2205 ironlake_rps_change_irq_handler(dev); 2175 ironlake_rps_change_irq_handler(dev_priv);
2206} 2176}
2207 2177
2208static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2178static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2179 u32 de_iir)
2209{ 2180{
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2211 enum pipe pipe; 2181 enum pipe pipe;
2212 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2182 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2213 2183
2214 if (hotplug_trigger) 2184 if (hotplug_trigger)
2215 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2185 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2216 2186
2217 if (de_iir & DE_ERR_INT_IVB) 2187 if (de_iir & DE_ERR_INT_IVB)
2218 ivb_err_int_handler(dev); 2188 ivb_err_int_handler(dev_priv);
2219 2189
2220 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2190 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2221 dp_aux_irq_handler(dev); 2191 dp_aux_irq_handler(dev_priv);
2222 2192
2223 if (de_iir & DE_GSE_IVB) 2193 if (de_iir & DE_GSE_IVB)
2224 intel_opregion_asle_intr(dev); 2194 intel_opregion_asle_intr(dev_priv);
2225 2195
2226 for_each_pipe(dev_priv, pipe) { 2196 for_each_pipe(dev_priv, pipe) {
2227 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2197 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2228 intel_pipe_handle_vblank(dev, pipe)) 2198 intel_pipe_handle_vblank(dev_priv, pipe))
2229 intel_check_page_flip(dev, pipe); 2199 intel_check_page_flip(dev_priv, pipe);
2230 2200
2231 /* plane/pipes map 1:1 on ilk+ */ 2201 /* plane/pipes map 1:1 on ilk+ */
2232 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2202 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2233 intel_prepare_page_flip(dev, pipe); 2203 intel_finish_page_flip_cs(dev_priv, pipe);
2234 intel_finish_page_flip_plane(dev, pipe);
2235 }
2236 } 2204 }
2237 2205
2238 /* check event from PCH */ 2206 /* check event from PCH */
2239 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2207 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2240 u32 pch_iir = I915_READ(SDEIIR); 2208 u32 pch_iir = I915_READ(SDEIIR);
2241 2209
2242 cpt_irq_handler(dev, pch_iir); 2210 cpt_irq_handler(dev_priv, pch_iir);
2243 2211
2244 /* clear PCH hotplug event before clear CPU irq */ 2212 /* clear PCH hotplug event before clear CPU irq */
2245 I915_WRITE(SDEIIR, pch_iir); 2213 I915_WRITE(SDEIIR, pch_iir);
@@ -2277,7 +2245,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2277 * able to process them after we restore SDEIER (as soon as we restore 2245 * able to process them after we restore SDEIER (as soon as we restore
2278 * it, we'll get an interrupt if SDEIIR still has something to process 2246 * it, we'll get an interrupt if SDEIIR still has something to process
2279 * due to its back queue). */ 2247 * due to its back queue). */
2280 if (!HAS_PCH_NOP(dev)) { 2248 if (!HAS_PCH_NOP(dev_priv)) {
2281 sde_ier = I915_READ(SDEIER); 2249 sde_ier = I915_READ(SDEIER);
2282 I915_WRITE(SDEIER, 0); 2250 I915_WRITE(SDEIER, 0);
2283 POSTING_READ(SDEIER); 2251 POSTING_READ(SDEIER);
@@ -2289,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2289 if (gt_iir) { 2257 if (gt_iir) {
2290 I915_WRITE(GTIIR, gt_iir); 2258 I915_WRITE(GTIIR, gt_iir);
2291 ret = IRQ_HANDLED; 2259 ret = IRQ_HANDLED;
2292 if (INTEL_INFO(dev)->gen >= 6) 2260 if (INTEL_GEN(dev_priv) >= 6)
2293 snb_gt_irq_handler(dev_priv, gt_iir); 2261 snb_gt_irq_handler(dev_priv, gt_iir);
2294 else 2262 else
2295 ilk_gt_irq_handler(dev_priv, gt_iir); 2263 ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2267,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2299 if (de_iir) { 2267 if (de_iir) {
2300 I915_WRITE(DEIIR, de_iir); 2268 I915_WRITE(DEIIR, de_iir);
2301 ret = IRQ_HANDLED; 2269 ret = IRQ_HANDLED;
2302 if (INTEL_INFO(dev)->gen >= 7) 2270 if (INTEL_GEN(dev_priv) >= 7)
2303 ivb_display_irq_handler(dev, de_iir); 2271 ivb_display_irq_handler(dev_priv, de_iir);
2304 else 2272 else
2305 ilk_display_irq_handler(dev, de_iir); 2273 ilk_display_irq_handler(dev_priv, de_iir);
2306 } 2274 }
2307 2275
2308 if (INTEL_INFO(dev)->gen >= 6) { 2276 if (INTEL_GEN(dev_priv) >= 6) {
2309 u32 pm_iir = I915_READ(GEN6_PMIIR); 2277 u32 pm_iir = I915_READ(GEN6_PMIIR);
2310 if (pm_iir) { 2278 if (pm_iir) {
2311 I915_WRITE(GEN6_PMIIR, pm_iir); 2279 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2284,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2316 2284
2317 I915_WRITE(DEIER, de_ier); 2285 I915_WRITE(DEIER, de_ier);
2318 POSTING_READ(DEIER); 2286 POSTING_READ(DEIER);
2319 if (!HAS_PCH_NOP(dev)) { 2287 if (!HAS_PCH_NOP(dev_priv)) {
2320 I915_WRITE(SDEIER, sde_ier); 2288 I915_WRITE(SDEIER, sde_ier);
2321 POSTING_READ(SDEIER); 2289 POSTING_READ(SDEIER);
2322 } 2290 }
@@ -2327,10 +2295,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2327 return ret; 2295 return ret;
2328} 2296}
2329 2297
2330static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2298static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2299 u32 hotplug_trigger,
2331 const u32 hpd[HPD_NUM_PINS]) 2300 const u32 hpd[HPD_NUM_PINS])
2332{ 2301{
2333 struct drm_i915_private *dev_priv = to_i915(dev);
2334 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2302 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2335 2303
2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2304 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2308,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2340 dig_hotplug_reg, hpd, 2308 dig_hotplug_reg, hpd,
2341 bxt_port_hotplug_long_detect); 2309 bxt_port_hotplug_long_detect);
2342 2310
2343 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2311 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2344} 2312}
2345 2313
2346static irqreturn_t 2314static irqreturn_t
2347gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2315gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2348{ 2316{
2349 struct drm_device *dev = dev_priv->dev;
2350 irqreturn_t ret = IRQ_NONE; 2317 irqreturn_t ret = IRQ_NONE;
2351 u32 iir; 2318 u32 iir;
2352 enum pipe pipe; 2319 enum pipe pipe;
@@ -2357,7 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2357 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2324 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2358 ret = IRQ_HANDLED; 2325 ret = IRQ_HANDLED;
2359 if (iir & GEN8_DE_MISC_GSE) 2326 if (iir & GEN8_DE_MISC_GSE)
2360 intel_opregion_asle_intr(dev); 2327 intel_opregion_asle_intr(dev_priv);
2361 else 2328 else
2362 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2329 DRM_ERROR("Unexpected DE Misc interrupt\n");
2363 } 2330 }
@@ -2381,26 +2348,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2381 GEN9_AUX_CHANNEL_D; 2348 GEN9_AUX_CHANNEL_D;
2382 2349
2383 if (iir & tmp_mask) { 2350 if (iir & tmp_mask) {
2384 dp_aux_irq_handler(dev); 2351 dp_aux_irq_handler(dev_priv);
2385 found = true; 2352 found = true;
2386 } 2353 }
2387 2354
2388 if (IS_BROXTON(dev_priv)) { 2355 if (IS_BROXTON(dev_priv)) {
2389 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2356 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2390 if (tmp_mask) { 2357 if (tmp_mask) {
2391 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2358 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2359 hpd_bxt);
2392 found = true; 2360 found = true;
2393 } 2361 }
2394 } else if (IS_BROADWELL(dev_priv)) { 2362 } else if (IS_BROADWELL(dev_priv)) {
2395 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2363 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2396 if (tmp_mask) { 2364 if (tmp_mask) {
2397 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2365 ilk_hpd_irq_handler(dev_priv,
2366 tmp_mask, hpd_bdw);
2398 found = true; 2367 found = true;
2399 } 2368 }
2400 } 2369 }
2401 2370
2402 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2371 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2403 gmbus_irq_handler(dev); 2372 gmbus_irq_handler(dev_priv);
2404 found = true; 2373 found = true;
2405 } 2374 }
2406 2375
@@ -2427,8 +2396,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2427 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2396 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2428 2397
2429 if (iir & GEN8_PIPE_VBLANK && 2398 if (iir & GEN8_PIPE_VBLANK &&
2430 intel_pipe_handle_vblank(dev, pipe)) 2399 intel_pipe_handle_vblank(dev_priv, pipe))
2431 intel_check_page_flip(dev, pipe); 2400 intel_check_page_flip(dev_priv, pipe);
2432 2401
2433 flip_done = iir; 2402 flip_done = iir;
2434 if (INTEL_INFO(dev_priv)->gen >= 9) 2403 if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2405,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2436 else 2405 else
2437 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2406 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2438 2407
2439 if (flip_done) { 2408 if (flip_done)
2440 intel_prepare_page_flip(dev, pipe); 2409 intel_finish_page_flip_cs(dev_priv, pipe);
2441 intel_finish_page_flip_plane(dev, pipe);
2442 }
2443 2410
2444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2411 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2445 hsw_pipe_crc_irq_handler(dev, pipe); 2412 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2446 2413
2447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2414 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2448 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2415 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2426,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2459 fault_errors); 2426 fault_errors);
2460 } 2427 }
2461 2428
2462 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2429 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2463 master_ctl & GEN8_DE_PCH_IRQ) { 2430 master_ctl & GEN8_DE_PCH_IRQ) {
2464 /* 2431 /*
2465 * FIXME(BDW): Assume for now that the new interrupt handling 2432 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2439,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2472 ret = IRQ_HANDLED; 2439 ret = IRQ_HANDLED;
2473 2440
2474 if (HAS_PCH_SPT(dev_priv)) 2441 if (HAS_PCH_SPT(dev_priv))
2475 spt_irq_handler(dev, iir); 2442 spt_irq_handler(dev_priv, iir);
2476 else 2443 else
2477 cpt_irq_handler(dev, iir); 2444 cpt_irq_handler(dev_priv, iir);
2478 } else { 2445 } else {
2479 /* 2446 /*
2480 * Like on previous PCH there seems to be something 2447 * Like on previous PCH there seems to be something
@@ -2550,20 +2517,20 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2550 2517
2551/** 2518/**
2552 * i915_reset_and_wakeup - do process context error handling work 2519 * i915_reset_and_wakeup - do process context error handling work
2553 * @dev: drm device 2520 * @dev_priv: i915 device private
2554 * 2521 *
2555 * Fire an error uevent so userspace can see that a hang or error 2522 * Fire an error uevent so userspace can see that a hang or error
2556 * was detected. 2523 * was detected.
2557 */ 2524 */
2558static void i915_reset_and_wakeup(struct drm_device *dev) 2525static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2559{ 2526{
2560 struct drm_i915_private *dev_priv = to_i915(dev); 2527 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
2561 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2528 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2562 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2529 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2563 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2530 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2564 int ret; 2531 int ret;
2565 2532
2566 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2533 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2567 2534
2568 /* 2535 /*
2569 * Note that there's only one work item which does gpu resets, so we 2536 * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2544,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2577 */ 2544 */
2578 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2545 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2579 DRM_DEBUG_DRIVER("resetting chip\n"); 2546 DRM_DEBUG_DRIVER("resetting chip\n");
2580 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2547 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2581 reset_event);
2582 2548
2583 /* 2549 /*
2584 * In most cases it's guaranteed that we get here with an RPM 2550 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2555,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2589 */ 2555 */
2590 intel_runtime_pm_get(dev_priv); 2556 intel_runtime_pm_get(dev_priv);
2591 2557
2592 intel_prepare_reset(dev); 2558 intel_prepare_reset(dev_priv);
2593 2559
2594 /* 2560 /*
2595 * All state reset _must_ be completed before we update the 2561 * All state reset _must_ be completed before we update the
@@ -2597,14 +2563,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2597 * pending state and not properly drop locks, resulting in 2563 * pending state and not properly drop locks, resulting in
2598 * deadlocks with the reset work. 2564 * deadlocks with the reset work.
2599 */ 2565 */
2600 ret = i915_reset(dev); 2566 ret = i915_reset(dev_priv);
2601 2567
2602 intel_finish_reset(dev); 2568 intel_finish_reset(dev_priv);
2603 2569
2604 intel_runtime_pm_put(dev_priv); 2570 intel_runtime_pm_put(dev_priv);
2605 2571
2606 if (ret == 0) 2572 if (ret == 0)
2607 kobject_uevent_env(&dev->primary->kdev->kobj, 2573 kobject_uevent_env(kobj,
2608 KOBJ_CHANGE, reset_done_event); 2574 KOBJ_CHANGE, reset_done_event);
2609 2575
2610 /* 2576 /*
@@ -2615,9 +2581,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2615 } 2581 }
2616} 2582}
2617 2583
2618static void i915_report_and_clear_eir(struct drm_device *dev) 2584static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2619{ 2585{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2586 uint32_t instdone[I915_NUM_INSTDONE_REG];
2622 u32 eir = I915_READ(EIR); 2587 u32 eir = I915_READ(EIR);
2623 int pipe, i; 2588 int pipe, i;
@@ -2627,9 +2592,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2627 2592
2628 pr_err("render error detected, EIR: 0x%08x\n", eir); 2593 pr_err("render error detected, EIR: 0x%08x\n", eir);
2629 2594
2630 i915_get_extra_instdone(dev, instdone); 2595 i915_get_extra_instdone(dev_priv, instdone);
2631 2596
2632 if (IS_G4X(dev)) { 2597 if (IS_G4X(dev_priv)) {
2633 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2598 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2634 u32 ipeir = I915_READ(IPEIR_I965); 2599 u32 ipeir = I915_READ(IPEIR_I965);
2635 2600
@@ -2651,7 +2616,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2651 } 2616 }
2652 } 2617 }
2653 2618
2654 if (!IS_GEN2(dev)) { 2619 if (!IS_GEN2(dev_priv)) {
2655 if (eir & I915_ERROR_PAGE_TABLE) { 2620 if (eir & I915_ERROR_PAGE_TABLE) {
2656 u32 pgtbl_err = I915_READ(PGTBL_ER); 2621 u32 pgtbl_err = I915_READ(PGTBL_ER);
2657 pr_err("page table error\n"); 2622 pr_err("page table error\n");
@@ -2673,7 +2638,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2673 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2638 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2674 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2639 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2675 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2640 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2676 if (INTEL_INFO(dev)->gen < 4) { 2641 if (INTEL_GEN(dev_priv) < 4) {
2677 u32 ipeir = I915_READ(IPEIR); 2642 u32 ipeir = I915_READ(IPEIR);
2678 2643
2679 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2644 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2709,18 +2674,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2709 2674
2710/** 2675/**
2711 * i915_handle_error - handle a gpu error 2676 * i915_handle_error - handle a gpu error
2712 * @dev: drm device 2677 * @dev_priv: i915 device private
2713 * @engine_mask: mask representing engines that are hung 2678 * @engine_mask: mask representing engines that are hung
2714 * Do some basic checking of register state at error time and 2679 * Do some basic checking of register state at error time and
2715 * dump it to the syslog. Also call i915_capture_error_state() to make 2680 * dump it to the syslog. Also call i915_capture_error_state() to make
2716 * sure we get a record and make it available in debugfs. Fire a uevent 2681 * sure we get a record and make it available in debugfs. Fire a uevent
2717 * so userspace knows something bad happened (should trigger collection 2682 * so userspace knows something bad happened (should trigger collection
2718 * of a ring dump etc.). 2683 * of a ring dump etc.).
2684 * @fmt: Error message format string
2719 */ 2685 */
2720void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2686void i915_handle_error(struct drm_i915_private *dev_priv,
2687 u32 engine_mask,
2721 const char *fmt, ...) 2688 const char *fmt, ...)
2722{ 2689{
2723 struct drm_i915_private *dev_priv = dev->dev_private;
2724 va_list args; 2690 va_list args;
2725 char error_msg[80]; 2691 char error_msg[80];
2726 2692
@@ -2728,8 +2694,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2728 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2694 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2729 va_end(args); 2695 va_end(args);
2730 2696
2731 i915_capture_error_state(dev, engine_mask, error_msg); 2697 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2732 i915_report_and_clear_eir(dev); 2698 i915_report_and_clear_eir(dev_priv);
2733 2699
2734 if (engine_mask) { 2700 if (engine_mask) {
2735 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2701 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2751,7 +2717,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2751 i915_error_wake_up(dev_priv, false); 2717 i915_error_wake_up(dev_priv, false);
2752 } 2718 }
2753 2719
2754 i915_reset_and_wakeup(dev); 2720 i915_reset_and_wakeup(dev_priv);
2755} 2721}
2756 2722
2757/* Called from drm generic code, passed 'crtc' which 2723/* Called from drm generic code, passed 'crtc' which
@@ -2869,9 +2835,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
2869} 2835}
2870 2836
2871static bool 2837static bool
2872ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2838ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
2873{ 2839{
2874 if (INTEL_INFO(dev)->gen >= 8) { 2840 if (INTEL_GEN(dev_priv) >= 8) {
2875 return (ipehr >> 23) == 0x1c; 2841 return (ipehr >> 23) == 0x1c;
2876 } else { 2842 } else {
2877 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2843 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2850,10 @@ static struct intel_engine_cs *
2884semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2850semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2885 u64 offset) 2851 u64 offset)
2886{ 2852{
2887 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2853 struct drm_i915_private *dev_priv = engine->i915;
2888 struct intel_engine_cs *signaller; 2854 struct intel_engine_cs *signaller;
2889 2855
2890 if (INTEL_INFO(dev_priv)->gen >= 8) { 2856 if (INTEL_GEN(dev_priv) >= 8) {
2891 for_each_engine(signaller, dev_priv) { 2857 for_each_engine(signaller, dev_priv) {
2892 if (engine == signaller) 2858 if (engine == signaller)
2893 continue; 2859 continue;
@@ -2916,7 +2882,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2916static struct intel_engine_cs * 2882static struct intel_engine_cs *
2917semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2883semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2918{ 2884{
2919 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2885 struct drm_i915_private *dev_priv = engine->i915;
2920 u32 cmd, ipehr, head; 2886 u32 cmd, ipehr, head;
2921 u64 offset = 0; 2887 u64 offset = 0;
2922 int i, backwards; 2888 int i, backwards;
@@ -2942,7 +2908,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2942 return NULL; 2908 return NULL;
2943 2909
2944 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2910 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2945 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2911 if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
2946 return NULL; 2912 return NULL;
2947 2913
2948 /* 2914 /*
@@ -2954,7 +2920,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2954 * ringbuffer itself. 2920 * ringbuffer itself.
2955 */ 2921 */
2956 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2922 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2957 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2923 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2958 2924
2959 for (i = backwards; i; --i) { 2925 for (i = backwards; i; --i) {
2960 /* 2926 /*
@@ -2976,7 +2942,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2976 return NULL; 2942 return NULL;
2977 2943
2978 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2944 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2979 if (INTEL_INFO(engine->dev)->gen >= 8) { 2945 if (INTEL_GEN(dev_priv) >= 8) {
2980 offset = ioread32(engine->buffer->virtual_start + head + 12); 2946 offset = ioread32(engine->buffer->virtual_start + head + 12);
2981 offset <<= 32; 2947 offset <<= 32;
2982 offset = ioread32(engine->buffer->virtual_start + head + 8); 2948 offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2952,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2986 2952
2987static int semaphore_passed(struct intel_engine_cs *engine) 2953static int semaphore_passed(struct intel_engine_cs *engine)
2988{ 2954{
2989 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2955 struct drm_i915_private *dev_priv = engine->i915;
2990 struct intel_engine_cs *signaller; 2956 struct intel_engine_cs *signaller;
2991 u32 seqno; 2957 u32 seqno;
2992 2958
@@ -3028,7 +2994,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
3028 if (engine->id != RCS) 2994 if (engine->id != RCS)
3029 return true; 2995 return true;
3030 2996
3031 i915_get_extra_instdone(engine->dev, instdone); 2997 i915_get_extra_instdone(engine->i915, instdone);
3032 2998
3033 /* There might be unstable subunit states even when 2999 /* There might be unstable subunit states even when
3034 * actual head is not moving. Filter out the unstable ones by 3000 * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3035,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
3069static enum intel_ring_hangcheck_action 3035static enum intel_ring_hangcheck_action
3070ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3036ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3071{ 3037{
3072 struct drm_device *dev = engine->dev; 3038 struct drm_i915_private *dev_priv = engine->i915;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 enum intel_ring_hangcheck_action ha; 3039 enum intel_ring_hangcheck_action ha;
3075 u32 tmp; 3040 u32 tmp;
3076 3041
@@ -3078,7 +3043,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3078 if (ha != HANGCHECK_HUNG) 3043 if (ha != HANGCHECK_HUNG)
3079 return ha; 3044 return ha;
3080 3045
3081 if (IS_GEN2(dev)) 3046 if (IS_GEN2(dev_priv))
3082 return HANGCHECK_HUNG; 3047 return HANGCHECK_HUNG;
3083 3048
3084 /* Is the chip hanging on a WAIT_FOR_EVENT? 3049 /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3053,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3088 */ 3053 */
3089 tmp = I915_READ_CTL(engine); 3054 tmp = I915_READ_CTL(engine);
3090 if (tmp & RING_WAIT) { 3055 if (tmp & RING_WAIT) {
3091 i915_handle_error(dev, 0, 3056 i915_handle_error(dev_priv, 0,
3092 "Kicking stuck wait on %s", 3057 "Kicking stuck wait on %s",
3093 engine->name); 3058 engine->name);
3094 I915_WRITE_CTL(engine, tmp); 3059 I915_WRITE_CTL(engine, tmp);
3095 return HANGCHECK_KICK; 3060 return HANGCHECK_KICK;
3096 } 3061 }
3097 3062
3098 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3063 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3099 switch (semaphore_passed(engine)) { 3064 switch (semaphore_passed(engine)) {
3100 default: 3065 default:
3101 return HANGCHECK_HUNG; 3066 return HANGCHECK_HUNG;
3102 case 1: 3067 case 1:
3103 i915_handle_error(dev, 0, 3068 i915_handle_error(dev_priv, 0,
3104 "Kicking stuck semaphore on %s", 3069 "Kicking stuck semaphore on %s",
3105 engine->name); 3070 engine->name);
3106 I915_WRITE_CTL(engine, tmp); 3071 I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3080,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3115 3080
3116static unsigned kick_waiters(struct intel_engine_cs *engine) 3081static unsigned kick_waiters(struct intel_engine_cs *engine)
3117{ 3082{
3118 struct drm_i915_private *i915 = to_i915(engine->dev); 3083 struct drm_i915_private *i915 = engine->i915;
3119 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3084 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3120 3085
3121 if (engine->hangcheck.user_interrupts == user_interrupts && 3086 if (engine->hangcheck.user_interrupts == user_interrupts &&
@@ -3144,7 +3109,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3144 struct drm_i915_private *dev_priv = 3109 struct drm_i915_private *dev_priv =
3145 container_of(work, typeof(*dev_priv), 3110 container_of(work, typeof(*dev_priv),
3146 gpu_error.hangcheck_work.work); 3111 gpu_error.hangcheck_work.work);
3147 struct drm_device *dev = dev_priv->dev;
3148 struct intel_engine_cs *engine; 3112 struct intel_engine_cs *engine;
3149 enum intel_engine_id id; 3113 enum intel_engine_id id;
3150 int busy_count = 0, rings_hung = 0; 3114 int busy_count = 0, rings_hung = 0;
@@ -3272,22 +3236,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3272 } 3236 }
3273 3237
3274 if (rings_hung) { 3238 if (rings_hung) {
3275 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3239 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
3276 goto out; 3240 goto out;
3277 } 3241 }
3278 3242
3279 if (busy_count) 3243 if (busy_count)
3280 /* Reset timer case chip hangs without another request 3244 /* Reset timer case chip hangs without another request
3281 * being added */ 3245 * being added */
3282 i915_queue_hangcheck(dev); 3246 i915_queue_hangcheck(dev_priv);
3283 3247
3284out: 3248out:
3285 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3249 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3286} 3250}
3287 3251
3288void i915_queue_hangcheck(struct drm_device *dev) 3252void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3289{ 3253{
3290 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3254 struct i915_gpu_error *e = &dev_priv->gpu_error;
3291 3255
3292 if (!i915.enable_hangcheck) 3256 if (!i915.enable_hangcheck)
3293 return; 3257 return;
@@ -3500,31 +3464,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3500 spin_unlock_irq(&dev_priv->irq_lock); 3464 spin_unlock_irq(&dev_priv->irq_lock);
3501} 3465}
3502 3466
3503static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3467static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3504 const u32 hpd[HPD_NUM_PINS]) 3468 const u32 hpd[HPD_NUM_PINS])
3505{ 3469{
3506 struct drm_i915_private *dev_priv = to_i915(dev);
3507 struct intel_encoder *encoder; 3470 struct intel_encoder *encoder;
3508 u32 enabled_irqs = 0; 3471 u32 enabled_irqs = 0;
3509 3472
3510 for_each_intel_encoder(dev, encoder) 3473 for_each_intel_encoder(dev_priv->dev, encoder)
3511 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3474 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3512 enabled_irqs |= hpd[encoder->hpd_pin]; 3475 enabled_irqs |= hpd[encoder->hpd_pin];
3513 3476
3514 return enabled_irqs; 3477 return enabled_irqs;
3515} 3478}
3516 3479
3517static void ibx_hpd_irq_setup(struct drm_device *dev) 3480static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3518{ 3481{
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520 u32 hotplug_irqs, hotplug, enabled_irqs; 3482 u32 hotplug_irqs, hotplug, enabled_irqs;
3521 3483
3522 if (HAS_PCH_IBX(dev)) { 3484 if (HAS_PCH_IBX(dev_priv)) {
3523 hotplug_irqs = SDE_HOTPLUG_MASK; 3485 hotplug_irqs = SDE_HOTPLUG_MASK;
3524 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3486 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3525 } else { 3487 } else {
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3488 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3489 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3528 } 3490 }
3529 3491
3530 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3492 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3505,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
3543 * When CPU and PCH are on the same package, port A 3505 * When CPU and PCH are on the same package, port A
3544 * HPD must be enabled in both north and south. 3506 * HPD must be enabled in both north and south.
3545 */ 3507 */
3546 if (HAS_PCH_LPT_LP(dev)) 3508 if (HAS_PCH_LPT_LP(dev_priv))
3547 hotplug |= PORTA_HOTPLUG_ENABLE; 3509 hotplug |= PORTA_HOTPLUG_ENABLE;
3548 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3510 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3549} 3511}
3550 3512
3551static void spt_hpd_irq_setup(struct drm_device *dev) 3513static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3552{ 3514{
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3554 u32 hotplug_irqs, hotplug, enabled_irqs; 3515 u32 hotplug_irqs, hotplug, enabled_irqs;
3555 3516
3556 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3517 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3518 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3558 3519
3559 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3520 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3560 3521
@@ -3569,24 +3530,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
3569 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3530 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3570} 3531}
3571 3532
3572static void ilk_hpd_irq_setup(struct drm_device *dev) 3533static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3573{ 3534{
3574 struct drm_i915_private *dev_priv = dev->dev_private;
3575 u32 hotplug_irqs, hotplug, enabled_irqs; 3535 u32 hotplug_irqs, hotplug, enabled_irqs;
3576 3536
3577 if (INTEL_INFO(dev)->gen >= 8) { 3537 if (INTEL_GEN(dev_priv) >= 8) {
3578 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3538 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3579 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3539 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3580 3540
3581 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3541 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3582 } else if (INTEL_INFO(dev)->gen >= 7) { 3542 } else if (INTEL_GEN(dev_priv) >= 7) {
3583 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3543 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3584 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3544 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3585 3545
3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3546 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3587 } else { 3547 } else {
3588 hotplug_irqs = DE_DP_A_HOTPLUG; 3548 hotplug_irqs = DE_DP_A_HOTPLUG;
3589 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3549 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3590 3550
3591 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3551 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3592 } 3552 }
@@ -3601,15 +3561,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
3601 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3561 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3602 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3562 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3603 3563
3604 ibx_hpd_irq_setup(dev); 3564 ibx_hpd_irq_setup(dev_priv);
3605} 3565}
3606 3566
3607static void bxt_hpd_irq_setup(struct drm_device *dev) 3567static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3608{ 3568{
3609 struct drm_i915_private *dev_priv = dev->dev_private;
3610 u32 hotplug_irqs, hotplug, enabled_irqs; 3569 u32 hotplug_irqs, hotplug, enabled_irqs;
3611 3570
3612 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3571 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3613 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3572 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3614 3573
3615 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3574 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3827,6 +3786,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3827 uint32_t de_pipe_enables; 3786 uint32_t de_pipe_enables;
3828 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3787 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3829 u32 de_port_enables; 3788 u32 de_port_enables;
3789 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3830 enum pipe pipe; 3790 enum pipe pipe;
3831 3791
3832 if (INTEL_INFO(dev_priv)->gen >= 9) { 3792 if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,6 +3822,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3862 de_pipe_enables); 3822 de_pipe_enables);
3863 3823
3864 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3824 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3825 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3865} 3826}
3866 3827
3867static int gen8_irq_postinstall(struct drm_device *dev) 3828static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4006,13 +3967,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
4006/* 3967/*
4007 * Returns true when a page flip has completed. 3968 * Returns true when a page flip has completed.
4008 */ 3969 */
4009static bool i8xx_handle_vblank(struct drm_device *dev, 3970static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
4010 int plane, int pipe, u32 iir) 3971 int plane, int pipe, u32 iir)
4011{ 3972{
4012 struct drm_i915_private *dev_priv = dev->dev_private;
4013 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3973 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4014 3974
4015 if (!intel_pipe_handle_vblank(dev, pipe)) 3975 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4016 return false; 3976 return false;
4017 3977
4018 if ((iir & flip_pending) == 0) 3978 if ((iir & flip_pending) == 0)
@@ -4027,12 +3987,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4027 if (I915_READ16(ISR) & flip_pending) 3987 if (I915_READ16(ISR) & flip_pending)
4028 goto check_page_flip; 3988 goto check_page_flip;
4029 3989
4030 intel_prepare_page_flip(dev, plane); 3990 intel_finish_page_flip_cs(dev_priv, pipe);
4031 intel_finish_page_flip(dev, pipe);
4032 return true; 3991 return true;
4033 3992
4034check_page_flip: 3993check_page_flip:
4035 intel_check_page_flip(dev, pipe); 3994 intel_check_page_flip(dev_priv, pipe);
4036 return false; 3995 return false;
4037} 3996}
4038 3997
@@ -4089,15 +4048,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4089 4048
4090 for_each_pipe(dev_priv, pipe) { 4049 for_each_pipe(dev_priv, pipe) {
4091 int plane = pipe; 4050 int plane = pipe;
4092 if (HAS_FBC(dev)) 4051 if (HAS_FBC(dev_priv))
4093 plane = !plane; 4052 plane = !plane;
4094 4053
4095 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4054 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4096 i8xx_handle_vblank(dev, plane, pipe, iir)) 4055 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4097 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4056 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4098 4057
4099 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4058 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4100 i9xx_pipe_crc_irq_handler(dev, pipe); 4059 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4101 4060
4102 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4061 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4103 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4062 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4182,7 +4141,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
4182 I915_WRITE(IER, enable_mask); 4141 I915_WRITE(IER, enable_mask);
4183 POSTING_READ(IER); 4142 POSTING_READ(IER);
4184 4143
4185 i915_enable_asle_pipestat(dev); 4144 i915_enable_asle_pipestat(dev_priv);
4186 4145
4187 /* Interrupt setup is already guaranteed to be single-threaded, this is 4146 /* Interrupt setup is already guaranteed to be single-threaded, this is
4188 * just to make the assert_spin_locked check happy. */ 4147 * just to make the assert_spin_locked check happy. */
@@ -4197,13 +4156,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
4197/* 4156/*
4198 * Returns true when a page flip has completed. 4157 * Returns true when a page flip has completed.
4199 */ 4158 */
4200static bool i915_handle_vblank(struct drm_device *dev, 4159static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4201 int plane, int pipe, u32 iir) 4160 int plane, int pipe, u32 iir)
4202{ 4161{
4203 struct drm_i915_private *dev_priv = dev->dev_private;
4204 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4162 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4205 4163
4206 if (!intel_pipe_handle_vblank(dev, pipe)) 4164 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4207 return false; 4165 return false;
4208 4166
4209 if ((iir & flip_pending) == 0) 4167 if ((iir & flip_pending) == 0)
@@ -4218,12 +4176,11 @@ static bool i915_handle_vblank(struct drm_device *dev,
4218 if (I915_READ(ISR) & flip_pending) 4176 if (I915_READ(ISR) & flip_pending)
4219 goto check_page_flip; 4177 goto check_page_flip;
4220 4178
4221 intel_prepare_page_flip(dev, plane); 4179 intel_finish_page_flip_cs(dev_priv, pipe);
4222 intel_finish_page_flip(dev, pipe);
4223 return true; 4180 return true;
4224 4181
4225check_page_flip: 4182check_page_flip:
4226 intel_check_page_flip(dev, pipe); 4183 intel_check_page_flip(dev_priv, pipe);
4227 return false; 4184 return false;
4228} 4185}
4229 4186
@@ -4273,11 +4230,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4273 break; 4230 break;
4274 4231
4275 /* Consume port. Then clear IIR or we'll miss events */ 4232 /* Consume port. Then clear IIR or we'll miss events */
4276 if (I915_HAS_HOTPLUG(dev) && 4233 if (I915_HAS_HOTPLUG(dev_priv) &&
4277 iir & I915_DISPLAY_PORT_INTERRUPT) { 4234 iir & I915_DISPLAY_PORT_INTERRUPT) {
4278 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4235 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4279 if (hotplug_status) 4236 if (hotplug_status)
4280 i9xx_hpd_irq_handler(dev, hotplug_status); 4237 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4281 } 4238 }
4282 4239
4283 I915_WRITE(IIR, iir & ~flip_mask); 4240 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4245,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4288 4245
4289 for_each_pipe(dev_priv, pipe) { 4246 for_each_pipe(dev_priv, pipe) {
4290 int plane = pipe; 4247 int plane = pipe;
4291 if (HAS_FBC(dev)) 4248 if (HAS_FBC(dev_priv))
4292 plane = !plane; 4249 plane = !plane;
4293 4250
4294 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4251 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4295 i915_handle_vblank(dev, plane, pipe, iir)) 4252 i915_handle_vblank(dev_priv, plane, pipe, iir))
4296 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4253 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4297 4254
4298 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4255 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4299 blc_event = true; 4256 blc_event = true;
4300 4257
4301 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4258 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4302 i9xx_pipe_crc_irq_handler(dev, pipe); 4259 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4303 4260
4304 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4261 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4305 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4262 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4264,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4307 } 4264 }
4308 4265
4309 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4266 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4310 intel_opregion_asle_intr(dev); 4267 intel_opregion_asle_intr(dev_priv);
4311 4268
4312 /* With MSI, interrupts are only generated when iir 4269 /* With MSI, interrupts are only generated when iir
4313 * transitions from zero to nonzero. If another bit got 4270 * transitions from zero to nonzero. If another bit got
@@ -4391,7 +4348,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4348 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4392 enable_mask |= I915_USER_INTERRUPT; 4349 enable_mask |= I915_USER_INTERRUPT;
4393 4350
4394 if (IS_G4X(dev)) 4351 if (IS_G4X(dev_priv))
4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4352 enable_mask |= I915_BSD_USER_INTERRUPT;
4396 4353
4397 /* Interrupt setup is already guaranteed to be single-threaded, this is 4354 /* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4363,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4406 * Enable some error detection, note the instruction error mask 4363 * Enable some error detection, note the instruction error mask
4407 * bit is reserved, so we leave it masked. 4364 * bit is reserved, so we leave it masked.
4408 */ 4365 */
4409 if (IS_G4X(dev)) { 4366 if (IS_G4X(dev_priv)) {
4410 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4367 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4411 GM45_ERROR_MEM_PRIV | 4368 GM45_ERROR_MEM_PRIV |
4412 GM45_ERROR_CP_PRIV | 4369 GM45_ERROR_CP_PRIV |
@@ -4424,26 +4381,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
4424 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4381 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4425 POSTING_READ(PORT_HOTPLUG_EN); 4382 POSTING_READ(PORT_HOTPLUG_EN);
4426 4383
4427 i915_enable_asle_pipestat(dev); 4384 i915_enable_asle_pipestat(dev_priv);
4428 4385
4429 return 0; 4386 return 0;
4430} 4387}
4431 4388
4432static void i915_hpd_irq_setup(struct drm_device *dev) 4389static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4433{ 4390{
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4435 u32 hotplug_en; 4391 u32 hotplug_en;
4436 4392
4437 assert_spin_locked(&dev_priv->irq_lock); 4393 assert_spin_locked(&dev_priv->irq_lock);
4438 4394
4439 /* Note HDMI and DP share hotplug bits */ 4395 /* Note HDMI and DP share hotplug bits */
4440 /* enable bits are the same for all generations */ 4396 /* enable bits are the same for all generations */
4441 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4397 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4442 /* Programming the CRT detection parameters tends 4398 /* Programming the CRT detection parameters tends
4443 to generate a spurious hotplug event about three 4399 to generate a spurious hotplug event about three
4444 seconds later. So just do it once. 4400 seconds later. So just do it once.
4445 */ 4401 */
4446 if (IS_G4X(dev)) 4402 if (IS_G4X(dev_priv))
4447 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4403 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4448 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4404 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4449 4405
@@ -4510,7 +4466,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4510 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4466 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4511 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4467 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4512 if (hotplug_status) 4468 if (hotplug_status)
4513 i9xx_hpd_irq_handler(dev, hotplug_status); 4469 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4514 } 4470 }
4515 4471
4516 I915_WRITE(IIR, iir & ~flip_mask); 4472 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4479,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4523 4479
4524 for_each_pipe(dev_priv, pipe) { 4480 for_each_pipe(dev_priv, pipe) {
4525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4481 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4526 i915_handle_vblank(dev, pipe, pipe, iir)) 4482 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4527 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4483 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4528 4484
4529 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4485 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4530 blc_event = true; 4486 blc_event = true;
4531 4487
4532 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4488 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4533 i9xx_pipe_crc_irq_handler(dev, pipe); 4489 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4534 4490
4535 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4491 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4536 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4492 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4537 } 4493 }
4538 4494
4539 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4495 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4540 intel_opregion_asle_intr(dev); 4496 intel_opregion_asle_intr(dev_priv);
4541 4497
4542 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4498 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4543 gmbus_irq_handler(dev); 4499 gmbus_irq_handler(dev_priv);
4544 4500
4545 /* With MSI, interrupts are only generated when iir 4501 /* With MSI, interrupts are only generated when iir
4546 * transitions from zero to nonzero. If another bit got 4502 * transitions from zero to nonzero. If another bit got
@@ -4611,6 +4567,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4611 else 4567 else
4612 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4568 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4613 4569
4570 dev_priv->rps.pm_intr_keep = 0;
4571
4572 /*
4573 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4574 * if GEN6_PM_UP_EI_EXPIRED is masked.
4575 *
4576 * TODO: verify if this can be reproduced on VLV,CHV.
4577 */
4578 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4579 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4580
4581 if (INTEL_INFO(dev_priv)->gen >= 8)
4582 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4583
4614 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4584 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4615 i915_hangcheck_elapsed); 4585 i915_hangcheck_elapsed);
4616 4586
@@ -4674,12 +4644,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4674 dev->driver->disable_vblank = ironlake_disable_vblank; 4644 dev->driver->disable_vblank = ironlake_disable_vblank;
4675 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4645 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4676 } else { 4646 } else {
4677 if (INTEL_INFO(dev_priv)->gen == 2) { 4647 if (IS_GEN2(dev_priv)) {
4678 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4648 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4679 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4649 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4680 dev->driver->irq_handler = i8xx_irq_handler; 4650 dev->driver->irq_handler = i8xx_irq_handler;
4681 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4651 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4682 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4652 } else if (IS_GEN3(dev_priv)) {
4683 dev->driver->irq_preinstall = i915_irq_preinstall; 4653 dev->driver->irq_preinstall = i915_irq_preinstall;
4684 dev->driver->irq_postinstall = i915_irq_postinstall; 4654 dev->driver->irq_postinstall = i915_irq_postinstall;
4685 dev->driver->irq_uninstall = i915_irq_uninstall; 4655 dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..7effe68d552c 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = {
54 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
55 .nuclear_pageflip = 0, 55 .nuclear_pageflip = 0,
56 .edp_vswing = 0, 56 .edp_vswing = 0,
57 .enable_guc_submission = false, 57 .enable_guc_loading = -1,
58 .enable_guc_submission = -1,
58 .guc_log_level = -1, 59 .guc_log_level = -1,
59 .enable_dp_mst = true, 60 .enable_dp_mst = true,
60 .inject_load_failure = 0, 61 .inject_load_failure = 0,
62 .enable_dpcd_backlight = false,
63 .enable_gvt = false,
61}; 64};
62 65
63module_param_named(modeset, i915.modeset, int, 0400); 66module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing,
197 "(0=use value from vbt [default], 1=low power swing(200mV)," 200 "(0=use value from vbt [default], 1=low power swing(200mV),"
198 "2=default swing(400mV))"); 201 "2=default swing(400mV))");
199 202
200module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); 203module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
201MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); 204MODULE_PARM_DESC(enable_guc_loading,
205 "Enable GuC firmware loading "
206 "(-1=auto [default], 0=never, 1=if available, 2=required)");
207
208module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
209MODULE_PARM_DESC(enable_guc_submission,
210 "Enable GuC submission "
211 "(-1=auto [default], 0=never, 1=if available, 2=required)");
202 212
203module_param_named(guc_log_level, i915.guc_log_level, int, 0400); 213module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
204MODULE_PARM_DESC(guc_log_level, 214MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst,
210module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); 220module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
211MODULE_PARM_DESC(inject_load_failure, 221MODULE_PARM_DESC(inject_load_failure,
212 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 222 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
223module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
224MODULE_PARM_DESC(enable_dpcd_backlight,
225 "Enable support for DPCD backlight control (default:false)");
226
227module_param_named(enable_gvt, i915.enable_gvt, bool, 0600);
228MODULE_PARM_DESC(enable_gvt,
229 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..0ad020b4a925 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
45 int enable_ips; 45 int enable_ips;
46 int invert_brightness; 46 int invert_brightness;
47 int enable_cmd_parser; 47 int enable_cmd_parser;
48 int enable_guc_loading;
49 int enable_guc_submission;
48 int guc_log_level; 50 int guc_log_level;
49 int use_mmio_flip; 51 int use_mmio_flip;
50 int mmio_debug; 52 int mmio_debug;
@@ -57,10 +59,11 @@ struct i915_params {
57 bool load_detect_test; 59 bool load_detect_test;
58 bool reset; 60 bool reset;
59 bool disable_display; 61 bool disable_display;
60 bool enable_guc_submission;
61 bool verbose_state_checks; 62 bool verbose_state_checks;
62 bool nuclear_pageflip; 63 bool nuclear_pageflip;
63 bool enable_dp_mst; 64 bool enable_dp_mst;
65 bool enable_dpcd_backlight;
66 bool enable_gvt;
64}; 67};
65 68
66extern struct i915_params i915 __read_mostly; 69extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
new file mode 100644
index 000000000000..c0cb2974caac
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _I915_PVINFO_H_
25#define _I915_PVINFO_H_
26
27/* The MMIO offset of the shared info between guest and host emulator */
28#define VGT_PVINFO_PAGE 0x78000
29#define VGT_PVINFO_SIZE 0x1000
30
31/*
32 * The following structure pages are defined in GEN MMIO space
33 * for virtualization. (One page for now)
34 */
35#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0
38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/*
44 * notifications from guest to vgpu device model
45 */
46enum vgt_g2v_type {
47 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
48 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
49 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
50 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
51 VGT_G2V_EXECLIST_CONTEXT_CREATE,
52 VGT_G2V_EXECLIST_CONTEXT_DESTROY,
53 VGT_G2V_MAX,
54};
55
56struct vgt_if {
57 u64 magic; /* VGT_MAGIC */
58 uint16_t version_major;
59 uint16_t version_minor;
60 u32 vgt_id; /* ID of vGT instance */
61 u32 rsv1[12]; /* pad to offset 0x40 */
62 /*
63 * Data structure to describe the balooning info of resources.
64 * Each VM can only have one portion of continuous area for now.
65 * (May support scattered resource in future)
66 * (starting from offset 0x40)
67 */
68 struct {
69 /* Aperture register balooning */
70 struct {
71 u32 base;
72 u32 size;
73 } mappable_gmadr; /* aperture */
74 /* GMADR register balooning */
75 struct {
76 u32 base;
77 u32 size;
78 } nonmappable_gmadr; /* non aperture */
79 /* allowed fence registers */
80 u32 fence_num;
81 u32 rsv2[3];
82 } avail_rs; /* available/assigned resource */
83 u32 rsv3[0x200 - 24]; /* pad to half page */
84 /*
85 * The bottom half page is for response from Gfx driver to hypervisor.
86 */
87 u32 rsv4;
88 u32 display_ready; /* ready for display owner switch */
89
90 u32 rsv5[4];
91
92 u32 g2v_notify;
93 u32 rsv6[7];
94
95 struct {
96 u32 lo;
97 u32 hi;
98 } pdp[4];
99
100 u32 execlist_context_descriptor_lo;
101 u32 execlist_context_descriptor_hi;
102
103 u32 rsv7[0x200 - 24]; /* pad to one page */
104} __packed;
105
106#define vgtif_reg(x) \
107 _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
108
109/* vGPU display status to be used by the host side */
110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112
113#endif /* _I915_PVINFO_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..c6bfbf8d7cca 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
220#define ECOCHK_PPGTT_WT_HSW (0x2<<3) 220#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
221#define ECOCHK_PPGTT_WB_HSW (0x3<<3) 221#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
222 222
223#define GEN8_CONFIG0 _MMIO(0xD00)
224#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
225
223#define GAC_ECO_BITS _MMIO(0x14090) 226#define GAC_ECO_BITS _MMIO(0x14090)
224#define ECOBITS_SNB_BIT (1<<13) 227#define ECOBITS_SNB_BIT (1<<13)
225#define ECOBITS_PPGTT_CACHE64B (3<<8) 228#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -442,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
442 */ 445 */
443#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) 446#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
444 447
448#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
449#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
445#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) 450#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
446#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 451#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
447#define SC_UPDATE_SCISSOR (0x1<<1) 452#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -713,6 +718,9 @@ enum skl_disp_power_wells {
713 /* Not actual bit groups. Used as IDs for lookup_power_well() */ 718 /* Not actual bit groups. Used as IDs for lookup_power_well() */
714 SKL_DISP_PW_ALWAYS_ON, 719 SKL_DISP_PW_ALWAYS_ON,
715 SKL_DISP_PW_DC_OFF, 720 SKL_DISP_PW_DC_OFF,
721
722 BXT_DPIO_CMN_A,
723 BXT_DPIO_CMN_BC,
716}; 724};
717 725
718#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) 726#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -886,7 +894,7 @@ enum skl_disp_power_wells {
886 * PLLs can be routed to any transcoder A/B/C. 894 * PLLs can be routed to any transcoder A/B/C.
887 * 895 *
888 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is 896 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
889 * digital port D (CHV) or port A (BXT). 897 * digital port D (CHV) or port A (BXT). ::
890 * 898 *
891 * 899 *
892 * Dual channel PHY (VLV/CHV/BXT) 900 * Dual channel PHY (VLV/CHV/BXT)
@@ -1273,6 +1281,15 @@ enum skl_disp_power_wells {
1273#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) 1281#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
1274#define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) 1282#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
1275 1283
1284#define _BXT_PHY_CTL_DDI_A 0x64C00
1285#define _BXT_PHY_CTL_DDI_B 0x64C10
1286#define _BXT_PHY_CTL_DDI_C 0x64C20
1287#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
1288#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
1289#define BXT_PHY_LANE_ENABLED (1 << 8)
1290#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
1291 _BXT_PHY_CTL_DDI_B)
1292
1276#define _PHY_CTL_FAMILY_EDP 0x64C80 1293#define _PHY_CTL_FAMILY_EDP 0x64C80
1277#define _PHY_CTL_FAMILY_DDI 0x64C90 1294#define _PHY_CTL_FAMILY_DDI 0x64C90
1278#define COMMON_RESET_DIS (1 << 31) 1295#define COMMON_RESET_DIS (1 << 31)
@@ -1669,6 +1686,9 @@ enum skl_disp_power_wells {
1669 1686
1670#define GEN7_TLB_RD_ADDR _MMIO(0x4700) 1687#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
1671 1688
1689#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
1690#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
1691
1672#if 0 1692#if 0
1673#define PRB0_TAIL _MMIO(0x2030) 1693#define PRB0_TAIL _MMIO(0x2030)
1674#define PRB0_HEAD _MMIO(0x2034) 1694#define PRB0_HEAD _MMIO(0x2034)
@@ -1804,6 +1824,10 @@ enum skl_disp_power_wells {
1804#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) 1824#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
1805#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) 1825#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
1806 1826
1827/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
1828#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
1829#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
1830
1807/* WaClearTdlStateAckDirtyBits */ 1831/* WaClearTdlStateAckDirtyBits */
1808#define GEN8_STATE_ACK _MMIO(0x20F0) 1832#define GEN8_STATE_ACK _MMIO(0x20F0)
1809#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) 1833#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
@@ -2161,6 +2185,9 @@ enum skl_disp_power_wells {
2161 2185
2162#define FBC_LL_SIZE (1536) 2186#define FBC_LL_SIZE (1536)
2163 2187
2188#define FBC_LLC_READ_CTRL _MMIO(0x9044)
2189#define FBC_LLC_FULLY_OPEN (1<<30)
2190
2164/* Framebuffer compression for GM45+ */ 2191/* Framebuffer compression for GM45+ */
2165#define DPFC_CB_BASE _MMIO(0x3200) 2192#define DPFC_CB_BASE _MMIO(0x3200)
2166#define DPFC_CONTROL _MMIO(0x3208) 2193#define DPFC_CONTROL _MMIO(0x3208)
@@ -2200,6 +2227,8 @@ enum skl_disp_power_wells {
2200#define ILK_DPFC_STATUS _MMIO(0x43210) 2227#define ILK_DPFC_STATUS _MMIO(0x43210)
2201#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) 2228#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
2202#define ILK_DPFC_CHICKEN _MMIO(0x43224) 2229#define ILK_DPFC_CHICKEN _MMIO(0x43224)
2230#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
2231#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
2203#define ILK_FBC_RT_BASE _MMIO(0x2128) 2232#define ILK_FBC_RT_BASE _MMIO(0x2128)
2204#define ILK_FBC_RT_VALID (1<<0) 2233#define ILK_FBC_RT_VALID (1<<0)
2205#define SNB_FBC_FRONT_BUFFER (1<<1) 2234#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -2449,6 +2478,8 @@ enum skl_disp_power_wells {
2449#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2478#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2450#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2479#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2451 2480
2481#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
2482
2452#define _FPA0 0x6040 2483#define _FPA0 0x6040
2453#define _FPA1 0x6044 2484#define _FPA1 0x6044
2454#define _FPB0 0x6048 2485#define _FPB0 0x6048
@@ -3020,6 +3051,18 @@ enum skl_disp_power_wells {
3020/* Same as Haswell, but 72064 bytes now. */ 3051/* Same as Haswell, but 72064 bytes now. */
3021#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 3052#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
3022 3053
3054enum {
3055 INTEL_ADVANCED_CONTEXT = 0,
3056 INTEL_LEGACY_32B_CONTEXT,
3057 INTEL_ADVANCED_AD_CONTEXT,
3058 INTEL_LEGACY_64B_CONTEXT
3059};
3060
3061#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
3062#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
3063 INTEL_LEGACY_64B_CONTEXT : \
3064 INTEL_LEGACY_32B_CONTEXT)
3065
3023#define CHV_CLK_CTL1 _MMIO(0x101100) 3066#define CHV_CLK_CTL1 _MMIO(0x101100)
3024#define VLV_CLK_CTL2 _MMIO(0x101104) 3067#define VLV_CLK_CTL2 _MMIO(0x101104)
3025#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 3068#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -6031,6 +6074,10 @@ enum skl_disp_power_wells {
6031#define CHICKEN_PAR1_1 _MMIO(0x42080) 6074#define CHICKEN_PAR1_1 _MMIO(0x42080)
6032#define DPA_MASK_VBLANK_SRD (1 << 15) 6075#define DPA_MASK_VBLANK_SRD (1 << 15)
6033#define FORCE_ARB_IDLE_PLANES (1 << 14) 6076#define FORCE_ARB_IDLE_PLANES (1 << 14)
6077#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
6078
6079#define CHICKEN_PAR2_1 _MMIO(0x42090)
6080#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
6034 6081
6035#define _CHICKEN_PIPESL_1_A 0x420b0 6082#define _CHICKEN_PIPESL_1_A 0x420b0
6036#define _CHICKEN_PIPESL_1_B 0x420b4 6083#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6039,6 +6086,7 @@ enum skl_disp_power_wells {
6039#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) 6086#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
6040 6087
6041#define DISP_ARB_CTL _MMIO(0x45000) 6088#define DISP_ARB_CTL _MMIO(0x45000)
6089#define DISP_FBC_MEMORY_WAKE (1<<31)
6042#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 6090#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
6043#define DISP_FBC_WM_DIS (1<<15) 6091#define DISP_FBC_WM_DIS (1<<15)
6044#define DISP_ARB_CTL2 _MMIO(0x45004) 6092#define DISP_ARB_CTL2 _MMIO(0x45004)
@@ -6052,6 +6100,9 @@ enum skl_disp_power_wells {
6052#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) 6100#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
6053#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 6101#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
6054 6102
6103#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
6104#define MASK_WAKEMEM (1<<13)
6105
6055#define SKL_DFSM _MMIO(0x51000) 6106#define SKL_DFSM _MMIO(0x51000)
6056#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) 6107#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
6057#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) 6108#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
@@ -6067,8 +6118,10 @@ enum skl_disp_power_wells {
6067 6118
6068#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) 6119#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
6069#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) 6120#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
6121#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
6070 6122
6071#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) 6123#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
6124#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
6072#define GEN8_CS_CHICKEN1 _MMIO(0x2580) 6125#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
6073 6126
6074/* GEN7 chicken */ 6127/* GEN7 chicken */
@@ -6076,6 +6129,7 @@ enum skl_disp_power_wells {
6076# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 6129# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
6077# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) 6130# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
6078#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) 6131#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
6132# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
6079# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 6133# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
6080 6134
6081#define HIZ_CHICKEN _MMIO(0x7018) 6135#define HIZ_CHICKEN _MMIO(0x7018)
@@ -6089,7 +6143,14 @@ enum skl_disp_power_wells {
6089#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6143#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
6090 6144
6091#define GEN8_L3SQCREG1 _MMIO(0xB100) 6145#define GEN8_L3SQCREG1 _MMIO(0xB100)
6092#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6146/*
6147 * Note that on CHV the following has an off-by-one error wrt. to BSpec.
6148 * Using the formula in BSpec leads to a hang, while the formula here works
6149 * fine and matches the formulas for all other platforms. A BSpec change
6150 * request has been filed to clarify this.
6151 */
6152#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
6153#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
6093 6154
6094#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 6155#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
6095#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6156#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -6921,6 +6982,7 @@ enum skl_disp_power_wells {
6921#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) 6982#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
6922 6983
6923#define GEN6_UCGCTL1 _MMIO(0x9400) 6984#define GEN6_UCGCTL1 _MMIO(0x9400)
6985# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
6924# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) 6986# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
6925# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) 6987# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
6926# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) 6988# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -6937,6 +6999,7 @@ enum skl_disp_power_wells {
6937 6999
6938#define GEN7_UCGCTL4 _MMIO(0x940c) 7000#define GEN7_UCGCTL4 _MMIO(0x940c)
6939#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) 7001#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
7002#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
6940 7003
6941#define GEN6_RCGCTL1 _MMIO(0x9410) 7004#define GEN6_RCGCTL1 _MMIO(0x9410)
6942#define GEN6_RCGCTL2 _MMIO(0x9414) 7005#define GEN6_RCGCTL2 _MMIO(0x9414)
@@ -7021,7 +7084,7 @@ enum skl_disp_power_wells {
7021#define VLV_RCEDATA _MMIO(0xA0BC) 7084#define VLV_RCEDATA _MMIO(0xA0BC)
7022#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) 7085#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
7023#define GEN6_PMINTRMSK _MMIO(0xA168) 7086#define GEN6_PMINTRMSK _MMIO(0xA168)
7024#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 7087#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
7025#define VLV_PWRDWNUPCTL _MMIO(0xA294) 7088#define VLV_PWRDWNUPCTL _MMIO(0xA294)
7026#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) 7089#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
7027#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) 7090#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
@@ -7557,14 +7620,15 @@ enum skl_disp_power_wells {
7557#define CDCLK_FREQ_540 (1<<26) 7620#define CDCLK_FREQ_540 (1<<26)
7558#define CDCLK_FREQ_337_308 (2<<26) 7621#define CDCLK_FREQ_337_308 (2<<26)
7559#define CDCLK_FREQ_675_617 (3<<26) 7622#define CDCLK_FREQ_675_617 (3<<26)
7560#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7561
7562#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 7623#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
7563#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 7624#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
7564#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 7625#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
7565#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 7626#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
7566#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 7627#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
7628#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
7629#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
7567#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7630#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7631#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7568 7632
7569/* LCPLL_CTL */ 7633/* LCPLL_CTL */
7570#define LCPLL1_CTL _MMIO(0x46010) 7634#define LCPLL1_CTL _MMIO(0x46010)
@@ -8140,6 +8204,8 @@ enum skl_disp_power_wells {
8140#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) 8204#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
8141#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) 8205#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
8142#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) 8206#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
8207#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
8208#define BXT_DPHY_DEFEATURE_EN (1 << 8)
8143#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) 8209#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
8144#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) 8210#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
8145#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) 8211#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..02507bfc8def 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
43 u64 units = 128ULL, div = 100000ULL; 43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret; 44 u32 ret;
45 45
46 if (!intel_enable_rc6(dev)) 46 if (!intel_enable_rc6())
47 return 0; 47 return 0;
48 48
49 intel_runtime_pm_get(dev_priv); 49 intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
70static ssize_t 70static ssize_t
71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72{ 72{
73 struct drm_minor *dminor = dev_to_drm_minor(kdev); 73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75} 74}
76 75
77static ssize_t 76static ssize_t
@@ -204,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
204 struct drm_minor *dminor = dev_to_drm_minor(dev); 203 struct drm_minor *dminor = dev_to_drm_minor(dev);
205 struct drm_device *drm_dev = dminor->dev; 204 struct drm_device *drm_dev = dminor->dev;
206 struct drm_i915_private *dev_priv = drm_dev->dev_private; 205 struct drm_i915_private *dev_priv = drm_dev->dev_private;
207 struct intel_context *ctx; 206 struct i915_gem_context *ctx;
208 u32 *temp = NULL; /* Just here to make handling failures easy */ 207 u32 *temp = NULL; /* Just here to make handling failures easy */
209 int slice = (int)(uintptr_t)attr->private; 208 int slice = (int)(uintptr_t)attr->private;
210 int ret; 209 int ret;
@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
397 /* We still need *_set_rps to process the new max_delay and 396 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though 397 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */ 398 * frequency request may be unchanged. */
400 intel_set_rps(dev, val); 399 intel_set_rps(dev_priv, val);
401 400
402 mutex_unlock(&dev_priv->rps.hw_lock); 401 mutex_unlock(&dev_priv->rps.hw_lock);
403 402
@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
461 /* We still need *_set_rps to process the new min_delay and 460 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though 461 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */ 462 * frequency request may be unchanged. */
464 intel_set_rps(dev, val); 463 intel_set_rps(dev_priv, val);
465 464
466 mutex_unlock(&dev_priv->rps.hw_lock); 465 mutex_unlock(&dev_priv->rps.hw_lock);
467 466
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..6768db032f84 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->dev->primary->index; 465 __entry->dev = from->i915->dev->primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 struct intel_engine_cs *engine = 489 __entry->dev = req->i915->dev->primary->index;
490 i915_gem_request_get_engine(req); 490 __entry->ring = req->engine->id;
491 __entry->dev = engine->dev->primary->index; 491 __entry->seqno = req->seqno;
492 __entry->ring = engine->id;
493 __entry->seqno = i915_gem_request_get_seqno(req);
494 __entry->flags = flags; 492 __entry->flags = flags;
495 i915_trace_irq_get(engine, req); 493 i915_trace_irq_get(req->engine, req);
496 ), 494 ),
497 495
498 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
511 ), 509 ),
512 510
513 TP_fast_assign( 511 TP_fast_assign(
514 __entry->dev = req->engine->dev->primary->index; 512 __entry->dev = req->i915->dev->primary->index;
515 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
516 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
517 __entry->flush = flush; 515 __entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
533 ), 531 ),
534 532
535 TP_fast_assign( 533 TP_fast_assign(
536 struct intel_engine_cs *engine = 534 __entry->dev = req->i915->dev->primary->index;
537 i915_gem_request_get_engine(req); 535 __entry->ring = req->engine->id;
538 __entry->dev = engine->dev->primary->index; 536 __entry->seqno = req->seqno;
539 __entry->ring = engine->id;
540 __entry->seqno = i915_gem_request_get_seqno(req);
541 ), 537 ),
542 538
543 TP_printk("dev=%u, ring=%u, seqno=%u", 539 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
560 ), 556 ),
561 557
562 TP_fast_assign( 558 TP_fast_assign(
563 __entry->dev = engine->dev->primary->index; 559 __entry->dev = engine->i915->dev->primary->index;
564 __entry->ring = engine->id; 560 __entry->ring = engine->id;
565 __entry->seqno = engine->get_seqno(engine); 561 __entry->seqno = engine->get_seqno(engine);
566 ), 562 ),
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
597 * less desirable. 593 * less desirable.
598 */ 594 */
599 TP_fast_assign( 595 TP_fast_assign(
600 struct intel_engine_cs *engine = 596 __entry->dev = req->i915->dev->primary->index;
601 i915_gem_request_get_engine(req); 597 __entry->ring = req->engine->id;
602 __entry->dev = engine->dev->primary->index; 598 __entry->seqno = req->seqno;
603 __entry->ring = engine->id;
604 __entry->seqno = i915_gem_request_get_seqno(req);
605 __entry->blocking = 599 __entry->blocking =
606 mutex_is_locked(&engine->dev->struct_mutex); 600 mutex_is_locked(&req->i915->dev->struct_mutex);
607 ), 601 ),
608 602
609 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
740 * the context. 734 * the context.
741 */ 735 */
742DECLARE_EVENT_CLASS(i915_context, 736DECLARE_EVENT_CLASS(i915_context,
743 TP_PROTO(struct intel_context *ctx), 737 TP_PROTO(struct i915_gem_context *ctx),
744 TP_ARGS(ctx), 738 TP_ARGS(ctx),
745 739
746 TP_STRUCT__entry( 740 TP_STRUCT__entry(
747 __field(u32, dev) 741 __field(u32, dev)
748 __field(struct intel_context *, ctx) 742 __field(struct i915_gem_context *, ctx)
749 __field(struct i915_address_space *, vm) 743 __field(struct i915_address_space *, vm)
750 ), 744 ),
751 745
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
760) 754)
761 755
762DEFINE_EVENT(i915_context, i915_context_create, 756DEFINE_EVENT(i915_context, i915_context_create,
763 TP_PROTO(struct intel_context *ctx), 757 TP_PROTO(struct i915_gem_context *ctx),
764 TP_ARGS(ctx) 758 TP_ARGS(ctx)
765); 759);
766 760
767DEFINE_EVENT(i915_context, i915_context_free, 761DEFINE_EVENT(i915_context, i915_context_free,
768 TP_PROTO(struct intel_context *ctx), 762 TP_PROTO(struct i915_gem_context *ctx),
769 TP_ARGS(ctx) 763 TP_ARGS(ctx)
770); 764);
771 765
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
777 * called only if full ppgtt is enabled. 771 * called only if full ppgtt is enabled.
778 */ 772 */
779TRACE_EVENT(switch_mm, 773TRACE_EVENT(switch_mm,
780 TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), 774 TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
781 775
782 TP_ARGS(engine, to), 776 TP_ARGS(engine, to),
783 777
784 TP_STRUCT__entry( 778 TP_STRUCT__entry(
785 __field(u32, ring) 779 __field(u32, ring)
786 __field(struct intel_context *, to) 780 __field(struct i915_gem_context *, to)
787 __field(struct i915_address_space *, vm) 781 __field(struct i915_address_space *, vm)
788 __field(u32, dev) 782 __field(u32, dev)
789 ), 783 ),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
792 __entry->ring = engine->id; 786 __entry->ring = engine->id;
793 __entry->to = to; 787 __entry->to = to;
794 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
795 __entry->dev = engine->dev->primary->index; 789 __entry->dev = engine->i915->dev->primary->index;
796 ), 790 ),
797 791
798 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..f6acb5a0e701 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -53,20 +53,19 @@
53 53
54/** 54/**
55 * i915_check_vgpu - detect virtual GPU 55 * i915_check_vgpu - detect virtual GPU
56 * @dev: drm device * 56 * @dev_priv: i915 device private
57 * 57 *
58 * This function is called at the initialization stage, to detect whether 58 * This function is called at the initialization stage, to detect whether
59 * running on a vGPU. 59 * running on a vGPU.
60 */ 60 */
61void i915_check_vgpu(struct drm_device *dev) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 struct drm_i915_private *dev_priv = to_i915(dev);
64 uint64_t magic; 63 uint64_t magic;
65 uint32_t version; 64 uint32_t version;
66 65
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68 67
69 if (!IS_HASWELL(dev)) 68 if (!IS_HASWELL(dev_priv))
70 return; 69 return;
71 70
72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); 71 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info;
102 * This function is called to deallocate the ballooned-out graphic memory, when 101 * This function is called to deallocate the ballooned-out graphic memory, when
103 * driver is unloaded or when ballooning fails. 102 * driver is unloaded or when ballooning fails.
104 */ 103 */
105void intel_vgt_deballoon(void) 104void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
106{ 105{
107 int i; 106 int i;
108 107
108 if (!intel_vgpu_active(dev_priv))
109 return;
110
109 DRM_DEBUG("VGT deballoon.\n"); 111 DRM_DEBUG("VGT deballoon.\n");
110 112
111 for (i = 0; i < 4; i++) { 113 for (i = 0; i < 4; i++) {
@@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm,
151 * of its graphic space being zero. Yet there are some portions ballooned out( 153 * of its graphic space being zero. Yet there are some portions ballooned out(
152 * the shadow part, which are marked as reserved by drm allocator). From the 154 * the shadow part, which are marked as reserved by drm allocator). From the
153 * host point of view, the graphic address space is partitioned by multiple 155 * host point of view, the graphic address space is partitioned by multiple
154 * vGPUs in different VMs. 156 * vGPUs in different VMs. ::
155 * 157 *
156 * vGPU1 view Host view 158 * vGPU1 view Host view
157 * 0 ------> +-----------+ +-----------+ 159 * 0 ------> +-----------+ +-----------+
158 * ^ |///////////| | vGPU3 | 160 * ^ |###########| | vGPU3 |
159 * | |///////////| +-----------+ 161 * | |###########| +-----------+
160 * | |///////////| | vGPU2 | 162 * | |###########| | vGPU2 |
161 * | +-----------+ +-----------+ 163 * | +-----------+ +-----------+
162 * mappable GM | available | ==> | vGPU1 | 164 * mappable GM | available | ==> | vGPU1 |
163 * | +-----------+ +-----------+ 165 * | +-----------+ +-----------+
164 * | |///////////| | | 166 * | |###########| | |
165 * v |///////////| | Host | 167 * v |###########| | Host |
166 * +=======+===========+ +===========+ 168 * +=======+===========+ +===========+
167 * ^ |///////////| | vGPU3 | 169 * ^ |###########| | vGPU3 |
168 * | |///////////| +-----------+ 170 * | |###########| +-----------+
169 * | |///////////| | vGPU2 | 171 * | |###########| | vGPU2 |
170 * | +-----------+ +-----------+ 172 * | +-----------+ +-----------+
171 * unmappable GM | available | ==> | vGPU1 | 173 * unmappable GM | available | ==> | vGPU1 |
172 * | +-----------+ +-----------+ 174 * | +-----------+ +-----------+
173 * | |///////////| | | 175 * | |###########| | |
174 * | |///////////| | Host | 176 * | |###########| | Host |
175 * v |///////////| | | 177 * v |###########| | |
176 * total GM size ------> +-----------+ +-----------+ 178 * total GM size ------> +-----------+ +-----------+
177 * 179 *
178 * Returns: 180 * Returns:
179 * zero on success, non-zero if configuration invalid or ballooning failed 181 * zero on success, non-zero if configuration invalid or ballooning failed
180 */ 182 */
181int intel_vgt_balloon(struct drm_device *dev) 183int intel_vgt_balloon(struct drm_i915_private *dev_priv)
182{ 184{
183 struct drm_i915_private *dev_priv = to_i915(dev);
184 struct i915_ggtt *ggtt = &dev_priv->ggtt; 185 struct i915_ggtt *ggtt = &dev_priv->ggtt;
185 unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; 186 unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
186 187
@@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev)
188 unsigned long unmappable_base, unmappable_size, unmappable_end; 189 unsigned long unmappable_base, unmappable_size, unmappable_end;
189 int ret; 190 int ret;
190 191
192 if (!intel_vgpu_active(dev_priv))
193 return 0;
194
191 mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); 195 mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
192 mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); 196 mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
193 unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); 197 unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
@@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev)
259 263
260err: 264err:
261 DRM_ERROR("VGT balloon fail\n"); 265 DRM_ERROR("VGT balloon fail\n");
262 intel_vgt_deballoon(); 266 intel_vgt_deballoon(dev_priv);
263 return ret; 267 return ret;
264} 268}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..3c3b2d24e830 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,94 +24,10 @@
24#ifndef _I915_VGPU_H_ 24#ifndef _I915_VGPU_H_
25#define _I915_VGPU_H_ 25#define _I915_VGPU_H_
26 26
27/* The MMIO offset of the shared info between guest and host emulator */ 27#include "i915_pvinfo.h"
28#define VGT_PVINFO_PAGE 0x78000
29#define VGT_PVINFO_SIZE 0x1000
30 28
31/* 29void i915_check_vgpu(struct drm_i915_private *dev_priv);
32 * The following structure pages are defined in GEN MMIO space 30int intel_vgt_balloon(struct drm_i915_private *dev_priv);
33 * for virtualization. (One page for now) 31void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
34 */
35#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0
38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/*
44 * notifications from guest to vgpu device model
45 */
46enum vgt_g2v_type {
47 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
48 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
49 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
50 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
51 VGT_G2V_EXECLIST_CONTEXT_CREATE,
52 VGT_G2V_EXECLIST_CONTEXT_DESTROY,
53 VGT_G2V_MAX,
54};
55
56struct vgt_if {
57 uint64_t magic; /* VGT_MAGIC */
58 uint16_t version_major;
59 uint16_t version_minor;
60 uint32_t vgt_id; /* ID of vGT instance */
61 uint32_t rsv1[12]; /* pad to offset 0x40 */
62 /*
63 * Data structure to describe the balooning info of resources.
64 * Each VM can only have one portion of continuous area for now.
65 * (May support scattered resource in future)
66 * (starting from offset 0x40)
67 */
68 struct {
69 /* Aperture register balooning */
70 struct {
71 uint32_t base;
72 uint32_t size;
73 } mappable_gmadr; /* aperture */
74 /* GMADR register balooning */
75 struct {
76 uint32_t base;
77 uint32_t size;
78 } nonmappable_gmadr; /* non aperture */
79 /* allowed fence registers */
80 uint32_t fence_num;
81 uint32_t rsv2[3];
82 } avail_rs; /* available/assigned resource */
83 uint32_t rsv3[0x200 - 24]; /* pad to half page */
84 /*
85 * The bottom half page is for response from Gfx driver to hypervisor.
86 */
87 uint32_t rsv4;
88 uint32_t display_ready; /* ready for display owner switch */
89
90 uint32_t rsv5[4];
91
92 uint32_t g2v_notify;
93 uint32_t rsv6[7];
94
95 struct {
96 uint32_t lo;
97 uint32_t hi;
98 } pdp[4];
99
100 uint32_t execlist_context_descriptor_lo;
101 uint32_t execlist_context_descriptor_hi;
102
103 uint32_t rsv7[0x200 - 24]; /* pad to one page */
104} __packed;
105
106#define vgtif_reg(x) \
107 _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
108
109/* vGPU display status to be used by the host side */
110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112
113extern void i915_check_vgpu(struct drm_device *dev);
114extern int intel_vgt_balloon(struct drm_device *dev);
115extern void intel_vgt_deballoon(void);
116 32
117#endif /* _I915_VGPU_H_ */ 33#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea721..c5a166752eda 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
191 191
192 /* plane scaler case: assign as a plane scaler */ 192 /* plane scaler case: assign as a plane scaler */
193 /* find the plane that set the bit as scaler_user */ 193 /* find the plane that set the bit as scaler_user */
194 plane = drm_state->planes[i]; 194 plane = drm_state->planes[i].ptr;
195 195
196 /* 196 /*
197 * to enable/disable hq mode, add planes that are using scaler 197 * to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
223 continue; 223 continue;
224 } 224 }
225 225
226 plane_state = to_intel_plane_state(drm_state->plane_states[i]); 226 plane_state = intel_atomic_get_existing_plane_state(drm_state,
227 intel_plane);
227 scaler_id = &plane_state->scaler_id; 228 scaler_id = &plane_state->scaler_id;
228 } 229 }
229 230
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..b9329c2a670a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
621static int i915_audio_component_get_cdclk_freq(struct device *dev) 621static int i915_audio_component_get_cdclk_freq(struct device *dev)
622{ 622{
623 struct drm_i915_private *dev_priv = dev_to_i915(dev); 623 struct drm_i915_private *dev_priv = dev_to_i915(dev);
624 int ret;
625 624
626 if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) 625 if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
627 return -ENODEV; 626 return -ENODEV;
628 627
629 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 628 return dev_priv->cdclk_freq;
630 ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
631
632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
633
634 return ret;
635} 629}
636 630
637static int i915_audio_component_sync_audio_rate(struct device *dev, 631static int i915_audio_component_sync_audio_rate(struct device *dev,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b235b6e88ead..da5ed4a850b9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
139 else 139 else
140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
141 141
142 panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
143 dvo_timing->himage_lo;
144 panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
145 dvo_timing->vimage_lo;
146
142 /* Some VBTs have bogus h/vtotal values */ 147 /* Some VBTs have bogus h/vtotal values */
143 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 148 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
144 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 149 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
213 218
214 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; 219 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
215 220
216 ret = intel_opregion_get_panel_type(dev_priv->dev); 221 ret = intel_opregion_get_panel_type(dev_priv);
217 if (ret >= 0) { 222 if (ret >= 0) {
218 WARN_ON(ret > 0xf); 223 WARN_ON(ret > 0xf);
219 panel_type = ret; 224 panel_type = ret;
@@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
318 return; 323 return;
319 } 324 }
320 325
326 dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
327 if (bdb->version >= 191 &&
328 get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
329 const struct bdb_lfp_backlight_control_method *method;
330
331 method = &backlight_data->backlight_control[panel_type];
332 dev_priv->vbt.backlight.type = method->type;
333 }
334
321 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 335 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
322 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 336 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
323 dev_priv->vbt.backlight.min_brightness = entry->min_brightness; 337 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
763 return; 777 return;
764 } 778 }
765 779
780 /*
781 * These fields are introduced from the VBT version 197 onwards,
782 * so making sure that these bits are set zero in the previous
783 * versions.
784 */
785 if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
786 dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
787 dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
788 }
789
766 /* We have mandatory mipi config blocks. Initialize as generic panel */ 790 /* We have mandatory mipi config blocks. Initialize as generic panel */
767 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 791 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
768} 792}
@@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1187 } 1211 }
1188 if (bdb->version < 106) { 1212 if (bdb->version < 106) {
1189 expected_size = 22; 1213 expected_size = 22;
1190 } else if (bdb->version < 109) { 1214 } else if (bdb->version < 111) {
1191 expected_size = 27; 1215 expected_size = 27;
1192 } else if (bdb->version < 195) { 1216 } else if (bdb->version < 195) {
1193 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); 1217 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1546,6 +1570,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
1546} 1570}
1547 1571
1548/** 1572/**
1573 * intel_bios_is_port_present - is the specified digital port present
1574 * @dev_priv: i915 device instance
1575 * @port: port to check
1576 *
1577 * Return true if the device in %port is present.
1578 */
1579bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
1580{
1581 static const struct {
1582 u16 dp, hdmi;
1583 } port_mapping[] = {
1584 [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
1585 [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
1586 [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
1587 [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
1588 };
1589 int i;
1590
1591 /* FIXME maybe deal with port A as well? */
1592 if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
1593 return false;
1594
1595 if (!dev_priv->vbt.child_dev_num)
1596 return false;
1597
1598 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1599 const union child_device_config *p_child =
1600 &dev_priv->vbt.child_dev[i];
1601 if ((p_child->common.dvo_port == port_mapping[port].dp ||
1602 p_child->common.dvo_port == port_mapping[port].hdmi) &&
1603 (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
1604 DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
1605 return true;
1606 }
1607
1608 return false;
1609}
1610
1611/**
1549 * intel_bios_is_port_edp - is the device in given port eDP 1612 * intel_bios_is_port_edp - is the device in given port eDP
1550 * @dev_priv: i915 device instance 1613 * @dev_priv: i915 device instance
1551 * @port: port to check 1614 * @port: port to check
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
30#ifndef _INTEL_BIOS_H_ 30#ifndef _INTEL_BIOS_H_
31#define _INTEL_BIOS_H_ 31#define _INTEL_BIOS_H_
32 32
33enum intel_backlight_type {
34 INTEL_BACKLIGHT_PMIC,
35 INTEL_BACKLIGHT_LPSS,
36 INTEL_BACKLIGHT_DISPLAY_DDI,
37 INTEL_BACKLIGHT_DSI_DCS,
38 INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
39};
40
33struct edp_power_seq { 41struct edp_power_seq {
34 u16 t1_t3; 42 u16 t1_t3;
35 u16 t8; 43 u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
113 u16 dual_link:2; 121 u16 dual_link:2;
114 u16 lane_cnt:2; 122 u16 lane_cnt:2;
115 u16 pixel_overlap:3; 123 u16 pixel_overlap:3;
116 u16 rsvd3:9; 124 u16 rgb_flip:1;
125#define DL_DCS_PORT_A 0x00
126#define DL_DCS_PORT_C 0x01
127#define DL_DCS_PORT_A_AND_C 0x02
128 u16 dl_dcs_cabc_ports:2;
129 u16 dl_dcs_backlight_ports:2;
130 u16 rsvd3:4;
117 131
118 u16 rsvd4; 132 u16 rsvd4;
119 133
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 1b3f97449395..522f5a2de015 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
547 /* Enable color management support when we have degamma & gamma LUTs. */ 547 /* Enable color management support when we have degamma & gamma LUTs. */
548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && 548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
549 INTEL_INFO(dev)->color.gamma_lut_size != 0) 549 INTEL_INFO(dev)->color.gamma_lut_size != 0)
550 drm_helper_crtc_enable_color_mgmt(crtc, 550 drm_crtc_enable_color_mgmt(crtc,
551 INTEL_INFO(dev)->color.degamma_lut_size, 551 INTEL_INFO(dev)->color.degamma_lut_size,
552 true,
552 INTEL_INFO(dev)->color.gamma_lut_size); 553 INTEL_INFO(dev)->color.gamma_lut_size);
553} 554}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..e115bcc6766f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -743,6 +743,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
743 .dpms = drm_atomic_helper_connector_dpms, 743 .dpms = drm_atomic_helper_connector_dpms,
744 .detect = intel_crt_detect, 744 .detect = intel_crt_detect,
745 .fill_modes = drm_helper_probe_single_connector_modes, 745 .fill_modes = drm_helper_probe_single_connector_modes,
746 .early_unregister = intel_connector_unregister,
746 .destroy = intel_crt_destroy, 747 .destroy = intel_crt_destroy,
747 .set_property = intel_crt_set_property, 748 .set_property = intel_crt_set_property,
748 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 749 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -753,7 +754,6 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
753static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 754static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
754 .mode_valid = intel_crt_mode_valid, 755 .mode_valid = intel_crt_mode_valid,
755 .get_modes = intel_crt_get_modes, 756 .get_modes = intel_crt_get_modes,
756 .best_encoder = intel_best_encoder,
757}; 757};
758 758
759static const struct drm_encoder_funcs intel_crt_enc_funcs = { 759static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev)
839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
840 840
841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
842 DRM_MODE_ENCODER_DAC, NULL); 842 DRM_MODE_ENCODER_DAC, "CRT");
843 843
844 intel_connector_attach_encoder(intel_connector, &crt->base); 844 intel_connector_attach_encoder(intel_connector, &crt->base);
845 845
@@ -876,7 +876,6 @@ void intel_crt_init(struct drm_device *dev)
876 crt->base.get_hw_state = intel_crt_get_hw_state; 876 crt->base.get_hw_state = intel_crt_get_hw_state;
877 } 877 }
878 intel_connector->get_hw_state = intel_connector_get_hw_state; 878 intel_connector->get_hw_state = intel_connector_get_hw_state;
879 intel_connector->unregister = intel_connector_unregister;
880 879
881 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 880 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
882 881
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47
44#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
51
45#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
46 55
47#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" 56#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
48 57
49MODULE_FIRMWARE(I915_CSR_SKL);
50MODULE_FIRMWARE(I915_CSR_BXT);
51 58
52#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 59
53#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
54 60
55#define CSR_MAX_FW_SIZE 0x2FFF 61#define CSR_MAX_FW_SIZE 0x2FFF
56#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 62#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
169 char substepping; 175 char substepping;
170}; 176};
171 177
172/*
173 * Kabylake derivated from Skylake H0, so SKL H0
174 * is the right firmware for KBL A0 (revid 0).
175 */
176static const struct stepping_info kbl_stepping_info[] = { 178static const struct stepping_info kbl_stepping_info[] = {
177 {'H', '0'}, {'I', '0'} 179 {'A', '0'}, {'B', '0'}, {'C', '0'},
180 {'D', '0'}, {'E', '0'}, {'F', '0'},
181 {'G', '0'}, {'H', '0'}, {'I', '0'},
178}; 182};
179 183
180static const struct stepping_info skl_stepping_info[] = { 184static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
298 302
299 csr->version = css_header->version; 303 csr->version = css_header->version;
300 304
301 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 305 if (IS_KABYLAKE(dev_priv)) {
306 required_min_version = KBL_CSR_VERSION_REQUIRED;
307 } else if (IS_SKYLAKE(dev_priv)) {
302 required_min_version = SKL_CSR_VERSION_REQUIRED; 308 required_min_version = SKL_CSR_VERSION_REQUIRED;
303 } else if (IS_BROXTON(dev_priv)) { 309 } else if (IS_BROXTON(dev_priv)) {
304 required_min_version = BXT_CSR_VERSION_REQUIRED; 310 required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
446 if (!HAS_CSR(dev_priv)) 452 if (!HAS_CSR(dev_priv))
447 return; 453 return;
448 454
449 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 455 if (IS_KABYLAKE(dev_priv))
456 csr->fw_path = I915_CSR_KBL;
457 else if (IS_SKYLAKE(dev_priv))
450 csr->fw_path = I915_CSR_SKL; 458 csr->fw_path = I915_CSR_SKL;
451 else if (IS_BROXTON(dev_priv)) 459 else if (IS_BROXTON(dev_priv))
452 csr->fw_path = I915_CSR_BXT; 460 csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..ad3b0ee5e55b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
948{ 948{
949 struct intel_shared_dpll *pll; 949 struct intel_shared_dpll *pll;
950 struct intel_dpll_hw_state *state; 950 struct intel_dpll_hw_state *state;
951 intel_clock_t clock; 951 struct dpll clock;
952 952
953 /* For DDI ports we always use a shared PLL. */ 953 /* For DDI ports we always use a shared PLL. */
954 if (WARN_ON(dpll == DPLL_ID_PRIVATE)) 954 if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1342 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 1342 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
1343 1343
1344out: 1344out:
1345 if (ret && IS_BROXTON(dev_priv)) {
1346 tmp = I915_READ(BXT_PHY_CTL(port));
1347 if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
1348 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
1349 DRM_ERROR("Port %c enabled but PHY powered down? "
1350 "(PHY_CTL %08x)\n", port_name(port), tmp);
1351 }
1352
1345 intel_display_power_put(dev_priv, power_domain); 1353 intel_display_power_put(dev_priv, power_domain);
1346 1354
1347 return ret; 1355 return ret;
@@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1742 } 1750 }
1743} 1751}
1744 1752
1745static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, 1753bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
1746 enum dpio_phy phy) 1754 enum dpio_phy phy)
1747{ 1755{
1756 enum port port;
1757
1748 if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) 1758 if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
1749 return false; 1759 return false;
1750 1760
@@ -1770,38 +1780,48 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
1770 return false; 1780 return false;
1771 } 1781 }
1772 1782
1783 for_each_port_masked(port,
1784 phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
1785 BIT(PORT_A)) {
1786 u32 tmp = I915_READ(BXT_PHY_CTL(port));
1787
1788 if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
1789 DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
1790 "for port %c powered down "
1791 "(PHY_CTL %08x)\n",
1792 phy, port_name(port), tmp);
1793
1794 return false;
1795 }
1796 }
1797
1773 return true; 1798 return true;
1774} 1799}
1775 1800
1776static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) 1801static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1777{ 1802{
1778 u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); 1803 u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
1779 1804
1780 return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; 1805 return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
1781} 1806}
1782 1807
1783static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, 1808static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
1784 enum dpio_phy phy) 1809 enum dpio_phy phy)
1785{ 1810{
1786 if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) 1811 if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
1787 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); 1812 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
1788} 1813}
1789 1814
1790static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, 1815void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1791 enum dpio_phy phy);
1792
1793static void broxton_phy_init(struct drm_i915_private *dev_priv,
1794 enum dpio_phy phy)
1795{ 1816{
1796 enum port port; 1817 u32 val;
1797 u32 ports, val;
1798 1818
1799 if (broxton_phy_is_enabled(dev_priv, phy)) { 1819 if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
1800 /* Still read out the GRC value for state verification */ 1820 /* Still read out the GRC value for state verification */
1801 if (phy == DPIO_PHY0) 1821 if (phy == DPIO_PHY0)
1802 dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); 1822 dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
1803 1823
1804 if (broxton_phy_verify_state(dev_priv, phy)) { 1824 if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
1805 DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " 1825 DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
1806 "won't reprogram it\n", phy); 1826 "won't reprogram it\n", phy);
1807 1827
@@ -1810,8 +1830,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1810 1830
1811 DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " 1831 DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
1812 "force reprogramming it\n", phy); 1832 "force reprogramming it\n", phy);
1813 } else {
1814 DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
1815 } 1833 }
1816 1834
1817 val = I915_READ(BXT_P_CR_GT_DISP_PWRON); 1835 val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
@@ -1831,28 +1849,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1831 DRM_ERROR("timeout during PHY%d power on\n", phy); 1849 DRM_ERROR("timeout during PHY%d power on\n", phy);
1832 } 1850 }
1833 1851
1834 if (phy == DPIO_PHY0)
1835 ports = BIT(PORT_B) | BIT(PORT_C);
1836 else
1837 ports = BIT(PORT_A);
1838
1839 for_each_port_masked(port, ports) {
1840 int lane;
1841
1842 for (lane = 0; lane < 4; lane++) {
1843 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
1844 /*
1845 * Note that on CHV this flag is called UPAR, but has
1846 * the same function.
1847 */
1848 val &= ~LATENCY_OPTIM;
1849 if (lane != 1)
1850 val |= LATENCY_OPTIM;
1851
1852 I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
1853 }
1854 }
1855
1856 /* Program PLL Rcomp code offset */ 1852 /* Program PLL Rcomp code offset */
1857 val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); 1853 val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
1858 val &= ~IREF0RC_OFFSET_MASK; 1854 val &= ~IREF0RC_OFFSET_MASK;
@@ -1899,10 +1895,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1899 * the corresponding calibrated value from PHY1, and disable 1895 * the corresponding calibrated value from PHY1, and disable
1900 * the automatic calibration on PHY0. 1896 * the automatic calibration on PHY0.
1901 */ 1897 */
1902 broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); 1898 val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
1903
1904 val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
1905 DPIO_PHY1);
1906 grc_code = val << GRC_CODE_FAST_SHIFT | 1899 grc_code = val << GRC_CODE_FAST_SHIFT |
1907 val << GRC_CODE_SLOW_SHIFT | 1900 val << GRC_CODE_SLOW_SHIFT |
1908 val; 1901 val;
@@ -1912,31 +1905,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1912 val |= GRC_DIS | GRC_RDY_OVRD; 1905 val |= GRC_DIS | GRC_RDY_OVRD;
1913 I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); 1906 I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
1914 } 1907 }
1915 /*
1916 * During PHY1 init delay waiting for GRC calibration to finish, since
1917 * it can happen in parallel with the subsequent PHY0 init.
1918 */
1919 1908
1920 val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); 1909 val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
1921 val |= COMMON_RESET_DIS; 1910 val |= COMMON_RESET_DIS;
1922 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); 1911 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
1923}
1924 1912
1925void broxton_ddi_phy_init(struct drm_i915_private *dev_priv) 1913 if (phy == DPIO_PHY1)
1926{ 1914 bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
1927 /* Enable PHY1 first since it provides Rcomp for PHY0 */
1928 broxton_phy_init(dev_priv, DPIO_PHY1);
1929 broxton_phy_init(dev_priv, DPIO_PHY0);
1930
1931 /*
1932 * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
1933 * PHY1 GRC calibration to finish, so wait for it here.
1934 */
1935 broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
1936} 1915}
1937 1916
1938static void broxton_phy_uninit(struct drm_i915_private *dev_priv, 1917void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1939 enum dpio_phy phy)
1940{ 1918{
1941 uint32_t val; 1919 uint32_t val;
1942 1920
@@ -1949,12 +1927,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
1949 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); 1927 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
1950} 1928}
1951 1929
1952void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
1953{
1954 broxton_phy_uninit(dev_priv, DPIO_PHY1);
1955 broxton_phy_uninit(dev_priv, DPIO_PHY0);
1956}
1957
1958static bool __printf(6, 7) 1930static bool __printf(6, 7)
1959__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1931__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1960 i915_reg_t reg, u32 mask, u32 expected, 1932 i915_reg_t reg, u32 mask, u32 expected,
@@ -1982,11 +1954,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1982 return false; 1954 return false;
1983} 1955}
1984 1956
1985static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, 1957bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
1986 enum dpio_phy phy) 1958 enum dpio_phy phy)
1987{ 1959{
1988 enum port port;
1989 u32 ports;
1990 uint32_t mask; 1960 uint32_t mask;
1991 bool ok; 1961 bool ok;
1992 1962
@@ -1994,27 +1964,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
1994 __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ 1964 __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
1995 ## __VA_ARGS__) 1965 ## __VA_ARGS__)
1996 1966
1997 /* We expect the PHY to be always enabled */ 1967 if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
1998 if (!broxton_phy_is_enabled(dev_priv, phy))
1999 return false; 1968 return false;
2000 1969
2001 ok = true; 1970 ok = true;
2002 1971
2003 if (phy == DPIO_PHY0)
2004 ports = BIT(PORT_B) | BIT(PORT_C);
2005 else
2006 ports = BIT(PORT_A);
2007
2008 for_each_port_masked(port, ports) {
2009 int lane;
2010
2011 for (lane = 0; lane < 4; lane++)
2012 ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
2013 LATENCY_OPTIM,
2014 lane != 1 ? LATENCY_OPTIM : 0,
2015 "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
2016 }
2017
2018 /* PLL Rcomp code offset */ 1972 /* PLL Rcomp code offset */
2019 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), 1973 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
2020 IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, 1974 IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
@@ -2058,11 +2012,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
2058#undef _CHK 2012#undef _CHK
2059} 2013}
2060 2014
2061void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) 2015static uint8_t
2016bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
2017 struct intel_crtc_state *pipe_config)
2062{ 2018{
2063 if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || 2019 switch (pipe_config->lane_count) {
2064 !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) 2020 case 1:
2065 i915_report_error(dev_priv, "DDI PHY state mismatch\n"); 2021 return 0;
2022 case 2:
2023 return BIT(2) | BIT(0);
2024 case 4:
2025 return BIT(3) | BIT(2) | BIT(0);
2026 default:
2027 MISSING_CASE(pipe_config->lane_count);
2028
2029 return 0;
2030 }
2031}
2032
2033static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
2034{
2035 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2036 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
2037 enum port port = dport->port;
2038 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2039 int lane;
2040
2041 for (lane = 0; lane < 4; lane++) {
2042 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
2043
2044 /*
2045 * Note that on CHV this flag is called UPAR, but has
2046 * the same function.
2047 */
2048 val &= ~LATENCY_OPTIM;
2049 if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
2050 val |= LATENCY_OPTIM;
2051
2052 I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
2053 }
2054}
2055
2056static uint8_t
2057bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
2058{
2059 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2060 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
2061 enum port port = dport->port;
2062 int lane;
2063 uint8_t mask;
2064
2065 mask = 0;
2066 for (lane = 0; lane < 4; lane++) {
2067 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
2068
2069 if (val & LATENCY_OPTIM)
2070 mask |= BIT(lane);
2071 }
2072
2073 return mask;
2066} 2074}
2067 2075
2068void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) 2076void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2236,13 +2244,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2236 } 2244 }
2237 2245
2238 intel_ddi_clock_get(encoder, pipe_config); 2246 intel_ddi_clock_get(encoder, pipe_config);
2247
2248 if (IS_BROXTON(dev_priv))
2249 pipe_config->lane_lat_optim_mask =
2250 bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
2239} 2251}
2240 2252
2241static bool intel_ddi_compute_config(struct intel_encoder *encoder, 2253static bool intel_ddi_compute_config(struct intel_encoder *encoder,
2242 struct intel_crtc_state *pipe_config) 2254 struct intel_crtc_state *pipe_config)
2243{ 2255{
2256 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
2244 int type = encoder->type; 2257 int type = encoder->type;
2245 int port = intel_ddi_get_encoder_port(encoder); 2258 int port = intel_ddi_get_encoder_port(encoder);
2259 int ret;
2246 2260
2247 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); 2261 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
2248 2262
@@ -2250,9 +2264,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
2250 pipe_config->cpu_transcoder = TRANSCODER_EDP; 2264 pipe_config->cpu_transcoder = TRANSCODER_EDP;
2251 2265
2252 if (type == INTEL_OUTPUT_HDMI) 2266 if (type == INTEL_OUTPUT_HDMI)
2253 return intel_hdmi_compute_config(encoder, pipe_config); 2267 ret = intel_hdmi_compute_config(encoder, pipe_config);
2254 else 2268 else
2255 return intel_dp_compute_config(encoder, pipe_config); 2269 ret = intel_dp_compute_config(encoder, pipe_config);
2270
2271 if (IS_BROXTON(dev_priv) && ret)
2272 pipe_config->lane_lat_optim_mask =
2273 bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
2274 pipe_config);
2275
2276 return ret;
2277
2256} 2278}
2257 2279
2258static const struct drm_encoder_funcs intel_ddi_funcs = { 2280static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -2347,10 +2369,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
2347 encoder = &intel_encoder->base; 2369 encoder = &intel_encoder->base;
2348 2370
2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 2371 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
2350 DRM_MODE_ENCODER_TMDS, NULL); 2372 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
2351 2373
2352 intel_encoder->compute_config = intel_ddi_compute_config; 2374 intel_encoder->compute_config = intel_ddi_compute_config;
2353 intel_encoder->enable = intel_enable_ddi; 2375 intel_encoder->enable = intel_enable_ddi;
2376 if (IS_BROXTON(dev_priv))
2377 intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
2354 intel_encoder->pre_enable = intel_ddi_pre_enable; 2378 intel_encoder->pre_enable = intel_ddi_pre_enable;
2355 intel_encoder->disable = intel_disable_ddi; 2379 intel_encoder->disable = intel_disable_ddi;
2356 intel_encoder->post_disable = intel_ddi_post_disable; 2380 intel_encoder->post_disable = intel_ddi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2113f401f0ba..0b2cd669ac05 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
36#include "intel_drv.h" 36#include "intel_drv.h"
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_gem_dmabuf.h"
39#include "intel_dsi.h" 40#include "intel_dsi.h"
40#include "i915_trace.h" 41#include "i915_trace.h"
41#include <drm/drm_atomic.h> 42#include <drm/drm_atomic.h>
@@ -46,7 +47,11 @@
46#include <drm/drm_rect.h> 47#include <drm/drm_rect.h>
47#include <linux/dma_remapping.h> 48#include <linux/dma_remapping.h>
48#include <linux/reservation.h> 49#include <linux/reservation.h>
49#include <linux/dma-buf.h> 50
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
50 55
51/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
52static const uint32_t i8xx_primary_formats[] = { 57static const uint32_t i8xx_primary_formats[] = {
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126static int bxt_calc_cdclk(int max_pixclk);
120 127
121typedef struct {
122 int min, max;
123} intel_range_t;
124
125typedef struct {
126 int dot_limit;
127 int p2_slow, p2_fast;
128} intel_p2_t;
129
130typedef struct intel_limit intel_limit_t;
131struct intel_limit { 128struct intel_limit {
132 intel_range_t dot, vco, n, m, m1, m2, p, p1; 129 struct {
133 intel_p2_t p2; 130 int min, max;
131 } dot, vco, n, m, m1, m2, p, p1;
132
133 struct {
134 int dot_limit;
135 int p2_slow, p2_fast;
136 } p2;
134}; 137};
135 138
136/* returns HPLL frequency in kHz */ 139/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
185static int 188static int
186intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 189intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
187{ 190{
191 /* RAWCLK_FREQ_VLV register updated from power well code */
188 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
189 CCK_DISPLAY_REF_CLOCK_CONTROL); 193 CCK_DISPLAY_REF_CLOCK_CONTROL);
190} 194}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
218 } 222 }
219} 223}
220 224
221static void intel_update_rawclk(struct drm_i915_private *dev_priv) 225void intel_update_rawclk(struct drm_i915_private *dev_priv)
222{ 226{
223 if (HAS_PCH_SPLIT(dev_priv)) 227 if (HAS_PCH_SPLIT(dev_priv))
224 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
255 return 270000; 259 return 270000;
256} 260}
257 261
258static const intel_limit_t intel_limits_i8xx_dac = { 262static const struct intel_limit intel_limits_i8xx_dac = {
259 .dot = { .min = 25000, .max = 350000 }, 263 .dot = { .min = 25000, .max = 350000 },
260 .vco = { .min = 908000, .max = 1512000 }, 264 .vco = { .min = 908000, .max = 1512000 },
261 .n = { .min = 2, .max = 16 }, 265 .n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
268 .p2_slow = 4, .p2_fast = 2 }, 272 .p2_slow = 4, .p2_fast = 2 },
269}; 273};
270 274
271static const intel_limit_t intel_limits_i8xx_dvo = { 275static const struct intel_limit intel_limits_i8xx_dvo = {
272 .dot = { .min = 25000, .max = 350000 }, 276 .dot = { .min = 25000, .max = 350000 },
273 .vco = { .min = 908000, .max = 1512000 }, 277 .vco = { .min = 908000, .max = 1512000 },
274 .n = { .min = 2, .max = 16 }, 278 .n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
281 .p2_slow = 4, .p2_fast = 4 }, 285 .p2_slow = 4, .p2_fast = 4 },
282}; 286};
283 287
284static const intel_limit_t intel_limits_i8xx_lvds = { 288static const struct intel_limit intel_limits_i8xx_lvds = {
285 .dot = { .min = 25000, .max = 350000 }, 289 .dot = { .min = 25000, .max = 350000 },
286 .vco = { .min = 908000, .max = 1512000 }, 290 .vco = { .min = 908000, .max = 1512000 },
287 .n = { .min = 2, .max = 16 }, 291 .n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
294 .p2_slow = 14, .p2_fast = 7 }, 298 .p2_slow = 14, .p2_fast = 7 },
295}; 299};
296 300
297static const intel_limit_t intel_limits_i9xx_sdvo = { 301static const struct intel_limit intel_limits_i9xx_sdvo = {
298 .dot = { .min = 20000, .max = 400000 }, 302 .dot = { .min = 20000, .max = 400000 },
299 .vco = { .min = 1400000, .max = 2800000 }, 303 .vco = { .min = 1400000, .max = 2800000 },
300 .n = { .min = 1, .max = 6 }, 304 .n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
307 .p2_slow = 10, .p2_fast = 5 }, 311 .p2_slow = 10, .p2_fast = 5 },
308}; 312};
309 313
310static const intel_limit_t intel_limits_i9xx_lvds = { 314static const struct intel_limit intel_limits_i9xx_lvds = {
311 .dot = { .min = 20000, .max = 400000 }, 315 .dot = { .min = 20000, .max = 400000 },
312 .vco = { .min = 1400000, .max = 2800000 }, 316 .vco = { .min = 1400000, .max = 2800000 },
313 .n = { .min = 1, .max = 6 }, 317 .n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
321}; 325};
322 326
323 327
324static const intel_limit_t intel_limits_g4x_sdvo = { 328static const struct intel_limit intel_limits_g4x_sdvo = {
325 .dot = { .min = 25000, .max = 270000 }, 329 .dot = { .min = 25000, .max = 270000 },
326 .vco = { .min = 1750000, .max = 3500000}, 330 .vco = { .min = 1750000, .max = 3500000},
327 .n = { .min = 1, .max = 4 }, 331 .n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
336 }, 340 },
337}; 341};
338 342
339static const intel_limit_t intel_limits_g4x_hdmi = { 343static const struct intel_limit intel_limits_g4x_hdmi = {
340 .dot = { .min = 22000, .max = 400000 }, 344 .dot = { .min = 22000, .max = 400000 },
341 .vco = { .min = 1750000, .max = 3500000}, 345 .vco = { .min = 1750000, .max = 3500000},
342 .n = { .min = 1, .max = 4 }, 346 .n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
349 .p2_slow = 10, .p2_fast = 5 }, 353 .p2_slow = 10, .p2_fast = 5 },
350}; 354};
351 355
352static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 356static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
353 .dot = { .min = 20000, .max = 115000 }, 357 .dot = { .min = 20000, .max = 115000 },
354 .vco = { .min = 1750000, .max = 3500000 }, 358 .vco = { .min = 1750000, .max = 3500000 },
355 .n = { .min = 1, .max = 3 }, 359 .n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
363 }, 367 },
364}; 368};
365 369
366static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 370static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
367 .dot = { .min = 80000, .max = 224000 }, 371 .dot = { .min = 80000, .max = 224000 },
368 .vco = { .min = 1750000, .max = 3500000 }, 372 .vco = { .min = 1750000, .max = 3500000 },
369 .n = { .min = 1, .max = 3 }, 373 .n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
377 }, 381 },
378}; 382};
379 383
380static const intel_limit_t intel_limits_pineview_sdvo = { 384static const struct intel_limit intel_limits_pineview_sdvo = {
381 .dot = { .min = 20000, .max = 400000}, 385 .dot = { .min = 20000, .max = 400000},
382 .vco = { .min = 1700000, .max = 3500000 }, 386 .vco = { .min = 1700000, .max = 3500000 },
383 /* Pineview's Ncounter is a ring counter */ 387 /* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
392 .p2_slow = 10, .p2_fast = 5 }, 396 .p2_slow = 10, .p2_fast = 5 },
393}; 397};
394 398
395static const intel_limit_t intel_limits_pineview_lvds = { 399static const struct intel_limit intel_limits_pineview_lvds = {
396 .dot = { .min = 20000, .max = 400000 }, 400 .dot = { .min = 20000, .max = 400000 },
397 .vco = { .min = 1700000, .max = 3500000 }, 401 .vco = { .min = 1700000, .max = 3500000 },
398 .n = { .min = 3, .max = 6 }, 402 .n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
410 * We calculate clock using (register_value + 2) for N/M1/M2, so here 414 * We calculate clock using (register_value + 2) for N/M1/M2, so here
411 * the range value for them is (actual_value - 2). 415 * the range value for them is (actual_value - 2).
412 */ 416 */
413static const intel_limit_t intel_limits_ironlake_dac = { 417static const struct intel_limit intel_limits_ironlake_dac = {
414 .dot = { .min = 25000, .max = 350000 }, 418 .dot = { .min = 25000, .max = 350000 },
415 .vco = { .min = 1760000, .max = 3510000 }, 419 .vco = { .min = 1760000, .max = 3510000 },
416 .n = { .min = 1, .max = 5 }, 420 .n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
423 .p2_slow = 10, .p2_fast = 5 }, 427 .p2_slow = 10, .p2_fast = 5 },
424}; 428};
425 429
426static const intel_limit_t intel_limits_ironlake_single_lvds = { 430static const struct intel_limit intel_limits_ironlake_single_lvds = {
427 .dot = { .min = 25000, .max = 350000 }, 431 .dot = { .min = 25000, .max = 350000 },
428 .vco = { .min = 1760000, .max = 3510000 }, 432 .vco = { .min = 1760000, .max = 3510000 },
429 .n = { .min = 1, .max = 3 }, 433 .n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
436 .p2_slow = 14, .p2_fast = 14 }, 440 .p2_slow = 14, .p2_fast = 14 },
437}; 441};
438 442
439static const intel_limit_t intel_limits_ironlake_dual_lvds = { 443static const struct intel_limit intel_limits_ironlake_dual_lvds = {
440 .dot = { .min = 25000, .max = 350000 }, 444 .dot = { .min = 25000, .max = 350000 },
441 .vco = { .min = 1760000, .max = 3510000 }, 445 .vco = { .min = 1760000, .max = 3510000 },
442 .n = { .min = 1, .max = 3 }, 446 .n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
450}; 454};
451 455
452/* LVDS 100mhz refclk limits. */ 456/* LVDS 100mhz refclk limits. */
453static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 457static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
454 .dot = { .min = 25000, .max = 350000 }, 458 .dot = { .min = 25000, .max = 350000 },
455 .vco = { .min = 1760000, .max = 3510000 }, 459 .vco = { .min = 1760000, .max = 3510000 },
456 .n = { .min = 1, .max = 2 }, 460 .n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
463 .p2_slow = 14, .p2_fast = 14 }, 467 .p2_slow = 14, .p2_fast = 14 },
464}; 468};
465 469
466static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 470static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
467 .dot = { .min = 25000, .max = 350000 }, 471 .dot = { .min = 25000, .max = 350000 },
468 .vco = { .min = 1760000, .max = 3510000 }, 472 .vco = { .min = 1760000, .max = 3510000 },
469 .n = { .min = 1, .max = 3 }, 473 .n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
476 .p2_slow = 7, .p2_fast = 7 }, 480 .p2_slow = 7, .p2_fast = 7 },
477}; 481};
478 482
479static const intel_limit_t intel_limits_vlv = { 483static const struct intel_limit intel_limits_vlv = {
480 /* 484 /*
481 * These are the data rate limits (measured in fast clocks) 485 * These are the data rate limits (measured in fast clocks)
482 * since those are the strictest limits we have. The fast 486 * since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
492 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
493}; 497};
494 498
495static const intel_limit_t intel_limits_chv = { 499static const struct intel_limit intel_limits_chv = {
496 /* 500 /*
497 * These are the data rate limits (measured in fast clocks) 501 * These are the data rate limits (measured in fast clocks)
498 * since those are the strictest limits we have. The fast 502 * since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
508 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 512 .p2 = { .p2_slow = 1, .p2_fast = 14 },
509}; 513};
510 514
511static const intel_limit_t intel_limits_bxt = { 515static const struct intel_limit intel_limits_bxt = {
512 /* FIXME: find real dot limits */ 516 /* FIXME: find real dot limits */
513 .dot = { .min = 0, .max = INT_MAX }, 517 .dot = { .min = 0, .max = INT_MAX },
514 .vco = { .min = 4800000, .max = 6700000 }, 518 .vco = { .min = 4800000, .max = 6700000 },
@@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
581 * divided-down version of it. 585 * divided-down version of it.
582 */ 586 */
583/* m1 is reserved as 0 in Pineview, n is a ring counter */ 587/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 588static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
585{ 589{
586 clock->m = clock->m2 + 2; 590 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 591 clock->p = clock->p1 * clock->p2;
@@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
598 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 602 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
599} 603}
600 604
601static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 605static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
602{ 606{
603 clock->m = i9xx_dpll_compute_m(clock); 607 clock->m = i9xx_dpll_compute_m(clock);
604 clock->p = clock->p1 * clock->p2; 608 clock->p = clock->p1 * clock->p2;
@@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
610 return clock->dot; 614 return clock->dot;
611} 615}
612 616
613static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 617static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
614{ 618{
615 clock->m = clock->m1 * clock->m2; 619 clock->m = clock->m1 * clock->m2;
616 clock->p = clock->p1 * clock->p2; 620 clock->p = clock->p1 * clock->p2;
@@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
622 return clock->dot / 5; 626 return clock->dot / 5;
623} 627}
624 628
625int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 629int chv_calc_dpll_params(int refclk, struct dpll *clock)
626{ 630{
627 clock->m = clock->m1 * clock->m2; 631 clock->m = clock->m1 * clock->m2;
628 clock->p = clock->p1 * clock->p2; 632 clock->p = clock->p1 * clock->p2;
@@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
642 */ 646 */
643 647
644static bool intel_PLL_is_valid(struct drm_device *dev, 648static bool intel_PLL_is_valid(struct drm_device *dev,
645 const intel_limit_t *limit, 649 const struct intel_limit *limit,
646 const intel_clock_t *clock) 650 const struct dpll *clock)
647{ 651{
648 if (clock->n < limit->n.min || limit->n.max < clock->n) 652 if (clock->n < limit->n.min || limit->n.max < clock->n)
649 INTELPllInvalid("n out of range\n"); 653 INTELPllInvalid("n out of range\n");
@@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
678} 682}
679 683
680static int 684static int
681i9xx_select_p2_div(const intel_limit_t *limit, 685i9xx_select_p2_div(const struct intel_limit *limit,
682 const struct intel_crtc_state *crtc_state, 686 const struct intel_crtc_state *crtc_state,
683 int target) 687 int target)
684{ 688{
@@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
713 * divider from @match_clock used for LVDS downclocking. 717 * divider from @match_clock used for LVDS downclocking.
714 */ 718 */
715static bool 719static bool
716i9xx_find_best_dpll(const intel_limit_t *limit, 720i9xx_find_best_dpll(const struct intel_limit *limit,
717 struct intel_crtc_state *crtc_state, 721 struct intel_crtc_state *crtc_state,
718 int target, int refclk, intel_clock_t *match_clock, 722 int target, int refclk, struct dpll *match_clock,
719 intel_clock_t *best_clock) 723 struct dpll *best_clock)
720{ 724{
721 struct drm_device *dev = crtc_state->base.crtc->dev; 725 struct drm_device *dev = crtc_state->base.crtc->dev;
722 intel_clock_t clock; 726 struct dpll clock;
723 int err = target; 727 int err = target;
724 728
725 memset(best_clock, 0, sizeof(*best_clock)); 729 memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
770 * divider from @match_clock used for LVDS downclocking. 774 * divider from @match_clock used for LVDS downclocking.
771 */ 775 */
772static bool 776static bool
773pnv_find_best_dpll(const intel_limit_t *limit, 777pnv_find_best_dpll(const struct intel_limit *limit,
774 struct intel_crtc_state *crtc_state, 778 struct intel_crtc_state *crtc_state,
775 int target, int refclk, intel_clock_t *match_clock, 779 int target, int refclk, struct dpll *match_clock,
776 intel_clock_t *best_clock) 780 struct dpll *best_clock)
777{ 781{
778 struct drm_device *dev = crtc_state->base.crtc->dev; 782 struct drm_device *dev = crtc_state->base.crtc->dev;
779 intel_clock_t clock; 783 struct dpll clock;
780 int err = target; 784 int err = target;
781 785
782 memset(best_clock, 0, sizeof(*best_clock)); 786 memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
825 * divider from @match_clock used for LVDS downclocking. 829 * divider from @match_clock used for LVDS downclocking.
826 */ 830 */
827static bool 831static bool
828g4x_find_best_dpll(const intel_limit_t *limit, 832g4x_find_best_dpll(const struct intel_limit *limit,
829 struct intel_crtc_state *crtc_state, 833 struct intel_crtc_state *crtc_state,
830 int target, int refclk, intel_clock_t *match_clock, 834 int target, int refclk, struct dpll *match_clock,
831 intel_clock_t *best_clock) 835 struct dpll *best_clock)
832{ 836{
833 struct drm_device *dev = crtc_state->base.crtc->dev; 837 struct drm_device *dev = crtc_state->base.crtc->dev;
834 intel_clock_t clock; 838 struct dpll clock;
835 int max_n; 839 int max_n;
836 bool found = false; 840 bool found = false;
837 /* approximately equals target * 0.00585 */ 841 /* approximately equals target * 0.00585 */
@@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
877 * best configuration and error found so far. Return the calculated error. 881 * best configuration and error found so far. Return the calculated error.
878 */ 882 */
879static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 883static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
880 const intel_clock_t *calculated_clock, 884 const struct dpll *calculated_clock,
881 const intel_clock_t *best_clock, 885 const struct dpll *best_clock,
882 unsigned int best_error_ppm, 886 unsigned int best_error_ppm,
883 unsigned int *error_ppm) 887 unsigned int *error_ppm)
884{ 888{
@@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
918 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 922 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
919 */ 923 */
920static bool 924static bool
921vlv_find_best_dpll(const intel_limit_t *limit, 925vlv_find_best_dpll(const struct intel_limit *limit,
922 struct intel_crtc_state *crtc_state, 926 struct intel_crtc_state *crtc_state,
923 int target, int refclk, intel_clock_t *match_clock, 927 int target, int refclk, struct dpll *match_clock,
924 intel_clock_t *best_clock) 928 struct dpll *best_clock)
925{ 929{
926 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 930 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
927 struct drm_device *dev = crtc->base.dev; 931 struct drm_device *dev = crtc->base.dev;
928 intel_clock_t clock; 932 struct dpll clock;
929 unsigned int bestppm = 1000000; 933 unsigned int bestppm = 1000000;
930 /* min update 19.2 MHz */ 934 /* min update 19.2 MHz */
931 int max_n = min(limit->n.max, refclk / 19200); 935 int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
977 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 981 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978 */ 982 */
979static bool 983static bool
980chv_find_best_dpll(const intel_limit_t *limit, 984chv_find_best_dpll(const struct intel_limit *limit,
981 struct intel_crtc_state *crtc_state, 985 struct intel_crtc_state *crtc_state,
982 int target, int refclk, intel_clock_t *match_clock, 986 int target, int refclk, struct dpll *match_clock,
983 intel_clock_t *best_clock) 987 struct dpll *best_clock)
984{ 988{
985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 989 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
986 struct drm_device *dev = crtc->base.dev; 990 struct drm_device *dev = crtc->base.dev;
987 unsigned int best_error_ppm; 991 unsigned int best_error_ppm;
988 intel_clock_t clock; 992 struct dpll clock;
989 uint64_t m2; 993 uint64_t m2;
990 int found = false; 994 int found = false;
991 995
@@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
1035} 1039}
1036 1040
1037bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1041bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1038 intel_clock_t *best_clock) 1042 struct dpll *best_clock)
1039{ 1043{
1040 int refclk = 100000; 1044 int refclk = 100000;
1041 const intel_limit_t *limit = &intel_limits_bxt; 1045 const struct intel_limit *limit = &intel_limits_bxt;
1042 1046
1043 return chv_find_best_dpll(limit, crtc_state, 1047 return chv_find_best_dpll(limit, crtc_state,
1044 target_clock, refclk, NULL, best_clock); 1048 target_clock, refclk, NULL, best_clock);
@@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1203 u32 val; 1207 u32 val;
1204 1208
1205 /* ILK FDI PLL is always enabled */ 1209 /* ILK FDI PLL is always enabled */
1206 if (INTEL_INFO(dev_priv)->gen == 5) 1210 if (IS_GEN5(dev_priv))
1207 return; 1211 return;
1208 1212
1209 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1213 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -2309,7 +2313,7 @@ err_pm:
2309 return ret; 2313 return ret;
2310} 2314}
2311 2315
2312static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2316void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2313{ 2317{
2314 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2318 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2315 struct i915_ggtt_view view; 2319 struct i915_ggtt_view view;
@@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3110 return -ENODEV; 3114 return -ENODEV;
3111} 3115}
3112 3116
3113static void intel_complete_page_flips(struct drm_device *dev) 3117static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3114{ 3118{
3115 struct drm_crtc *crtc; 3119 struct intel_crtc *crtc;
3116
3117 for_each_crtc(dev, crtc) {
3118 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3119 enum plane plane = intel_crtc->plane;
3120 3120
3121 intel_prepare_page_flip(dev, plane); 3121 for_each_intel_crtc(dev_priv->dev, crtc)
3122 intel_finish_page_flip_plane(dev, plane); 3122 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123 }
3124} 3123}
3125 3124
3126static void intel_update_primary_planes(struct drm_device *dev) 3125static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
3143 } 3142 }
3144} 3143}
3145 3144
3146void intel_prepare_reset(struct drm_device *dev) 3145void intel_prepare_reset(struct drm_i915_private *dev_priv)
3147{ 3146{
3148 /* no reset support for gen2 */ 3147 /* no reset support for gen2 */
3149 if (IS_GEN2(dev)) 3148 if (IS_GEN2(dev_priv))
3150 return; 3149 return;
3151 3150
3152 /* reset doesn't touch the display */ 3151 /* reset doesn't touch the display */
3153 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3152 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3154 return; 3153 return;
3155 3154
3156 drm_modeset_lock_all(dev); 3155 drm_modeset_lock_all(dev_priv->dev);
3157 /* 3156 /*
3158 * Disabling the crtcs gracefully seems nicer. Also the 3157 * Disabling the crtcs gracefully seems nicer. Also the
3159 * g33 docs say we should at least disable all the planes. 3158 * g33 docs say we should at least disable all the planes.
3160 */ 3159 */
3161 intel_display_suspend(dev); 3160 intel_display_suspend(dev_priv->dev);
3162} 3161}
3163 3162
3164void intel_finish_reset(struct drm_device *dev) 3163void intel_finish_reset(struct drm_i915_private *dev_priv)
3165{ 3164{
3166 struct drm_i915_private *dev_priv = to_i915(dev);
3167
3168 /* 3165 /*
3169 * Flips in the rings will be nuked by the reset, 3166 * Flips in the rings will be nuked by the reset,
3170 * so complete all pending flips so that user space 3167 * so complete all pending flips so that user space
3171 * will get its events and not get stuck. 3168 * will get its events and not get stuck.
3172 */ 3169 */
3173 intel_complete_page_flips(dev); 3170 intel_complete_page_flips(dev_priv);
3174 3171
3175 /* no reset support for gen2 */ 3172 /* no reset support for gen2 */
3176 if (IS_GEN2(dev)) 3173 if (IS_GEN2(dev_priv))
3177 return; 3174 return;
3178 3175
3179 /* reset doesn't touch the display */ 3176 /* reset doesn't touch the display */
3180 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3177 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3181 /* 3178 /*
3182 * Flips in the rings have been nuked by the reset, 3179 * Flips in the rings have been nuked by the reset,
3183 * so update the base address of all primary 3180 * so update the base address of all primary
@@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev)
3187 * FIXME: Atomic will make this obsolete since we won't schedule 3184 * FIXME: Atomic will make this obsolete since we won't schedule
3188 * CS-based flips (which might get lost in gpu resets) any more. 3185 * CS-based flips (which might get lost in gpu resets) any more.
3189 */ 3186 */
3190 intel_update_primary_planes(dev); 3187 intel_update_primary_planes(dev_priv->dev);
3191 return; 3188 return;
3192 } 3189 }
3193 3190
@@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev)
3198 intel_runtime_pm_disable_interrupts(dev_priv); 3195 intel_runtime_pm_disable_interrupts(dev_priv);
3199 intel_runtime_pm_enable_interrupts(dev_priv); 3196 intel_runtime_pm_enable_interrupts(dev_priv);
3200 3197
3201 intel_modeset_init_hw(dev); 3198 intel_modeset_init_hw(dev_priv->dev);
3202 3199
3203 spin_lock_irq(&dev_priv->irq_lock); 3200 spin_lock_irq(&dev_priv->irq_lock);
3204 if (dev_priv->display.hpd_irq_setup) 3201 if (dev_priv->display.hpd_irq_setup)
3205 dev_priv->display.hpd_irq_setup(dev); 3202 dev_priv->display.hpd_irq_setup(dev_priv);
3206 spin_unlock_irq(&dev_priv->irq_lock); 3203 spin_unlock_irq(&dev_priv->irq_lock);
3207 3204
3208 intel_display_resume(dev); 3205 intel_display_resume(dev_priv->dev);
3209 3206
3210 intel_hpd_init(dev_priv); 3207 intel_hpd_init(dev_priv);
3211 3208
3212 drm_modeset_unlock_all(dev); 3209 drm_modeset_unlock_all(dev_priv->dev);
3213} 3210}
3214 3211
3215static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3212static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3224 return false; 3221 return false;
3225 3222
3226 spin_lock_irq(&dev->event_lock); 3223 spin_lock_irq(&dev->event_lock);
3227 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3224 pending = to_intel_crtc(crtc)->flip_work != NULL;
3228 spin_unlock_irq(&dev->event_lock); 3225 spin_unlock_irq(&dev->event_lock);
3229 3226
3230 return pending; 3227 return pending;
@@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3803 if (atomic_read(&crtc->unpin_work_count) == 0) 3800 if (atomic_read(&crtc->unpin_work_count) == 0)
3804 continue; 3801 continue;
3805 3802
3806 if (crtc->unpin_work) 3803 if (crtc->flip_work)
3807 intel_wait_for_vblank(dev, crtc->pipe); 3804 intel_wait_for_vblank(dev, crtc->pipe);
3808 3805
3809 return true; 3806 return true;
@@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3815static void page_flip_completed(struct intel_crtc *intel_crtc) 3812static void page_flip_completed(struct intel_crtc *intel_crtc)
3816{ 3813{
3817 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3814 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3818 struct intel_unpin_work *work = intel_crtc->unpin_work; 3815 struct intel_flip_work *work = intel_crtc->flip_work;
3819 3816
3820 /* ensure that the unpin work is consistent wrt ->pending. */ 3817 intel_crtc->flip_work = NULL;
3821 smp_rmb();
3822 intel_crtc->unpin_work = NULL;
3823 3818
3824 if (work->event) 3819 if (work->event)
3825 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3820 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3827 drm_crtc_vblank_put(&intel_crtc->base); 3822 drm_crtc_vblank_put(&intel_crtc->base);
3828 3823
3829 wake_up_all(&dev_priv->pending_flip_queue); 3824 wake_up_all(&dev_priv->pending_flip_queue);
3830 queue_work(dev_priv->wq, &work->work); 3825 queue_work(dev_priv->wq, &work->unpin_work);
3831 3826
3832 trace_i915_flip_complete(intel_crtc->plane, 3827 trace_i915_flip_complete(intel_crtc->plane,
3833 work->pending_flip_obj); 3828 work->pending_flip_obj);
@@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3851 3846
3852 if (ret == 0) { 3847 if (ret == 0) {
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3849 struct intel_flip_work *work;
3854 3850
3855 spin_lock_irq(&dev->event_lock); 3851 spin_lock_irq(&dev->event_lock);
3856 if (intel_crtc->unpin_work) { 3852 work = intel_crtc->flip_work;
3853 if (work && !is_mmio_work(work)) {
3857 WARN_ONCE(1, "Removing stuck page flip\n"); 3854 WARN_ONCE(1, "Removing stuck page flip\n");
3858 page_flip_completed(intel_crtc); 3855 page_flip_completed(intel_crtc);
3859 } 3856 }
@@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4281 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4278 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4282 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4279 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4283 4280
4284 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4281 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4285 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4282 intel_crtc->base.base.id, intel_crtc->base.name,
4283 intel_crtc->pipe, SKL_CRTC_INDEX);
4286 4284
4287 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4285 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4288 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), 4286 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4312 4310
4313 bool force_detach = !fb || !plane_state->visible; 4311 bool force_detach = !fb || !plane_state->visible;
4314 4312
4315 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4313 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4316 intel_plane->base.base.id, intel_crtc->pipe, 4314 intel_plane->base.base.id, intel_plane->base.name,
4317 drm_plane_index(&intel_plane->base)); 4315 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4318 4316
4319 ret = skl_update_scaler(crtc_state, force_detach, 4317 ret = skl_update_scaler(crtc_state, force_detach,
4320 drm_plane_index(&intel_plane->base), 4318 drm_plane_index(&intel_plane->base),
@@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4330 4328
4331 /* check colorkey */ 4329 /* check colorkey */
4332 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4330 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4333 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4331 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4334 intel_plane->base.base.id); 4332 intel_plane->base.base.id,
4333 intel_plane->base.name);
4335 return -EINVAL; 4334 return -EINVAL;
4336 } 4335 }
4337 4336
@@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4350 case DRM_FORMAT_VYUY: 4349 case DRM_FORMAT_VYUY:
4351 break; 4350 break;
4352 default: 4351 default:
4353 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4352 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4354 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4353 intel_plane->base.base.id, intel_plane->base.name,
4354 fb->base.id, fb->pixel_format);
4355 return -EINVAL; 4355 return -EINVAL;
4356 } 4356 }
4357 4357
@@ -4641,14 +4641,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4641 struct intel_plane_state *old_primary_state = 4641 struct intel_plane_state *old_primary_state =
4642 to_intel_plane_state(old_pri_state); 4642 to_intel_plane_state(old_pri_state);
4643 4643
4644 intel_fbc_pre_update(crtc); 4644 intel_fbc_pre_update(crtc, pipe_config, primary_state);
4645 4645
4646 if (old_primary_state->visible && 4646 if (old_primary_state->visible &&
4647 (modeset || !primary_state->visible)) 4647 (modeset || !primary_state->visible))
4648 intel_pre_disable_primary(&crtc->base); 4648 intel_pre_disable_primary(&crtc->base);
4649 } 4649 }
4650 4650
4651 if (pipe_config->disable_cxsr) { 4651 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
4652 crtc->wm.cxsr_allowed = false; 4652 crtc->wm.cxsr_allowed = false;
4653 4653
4654 /* 4654 /*
@@ -4841,6 +4841,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4841 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4841 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4842 false); 4842 false);
4843 4843
4844 for_each_encoder_on_crtc(dev, crtc, encoder)
4845 if (encoder->pre_pll_enable)
4846 encoder->pre_pll_enable(encoder);
4847
4844 if (intel_crtc->config->shared_dpll) 4848 if (intel_crtc->config->shared_dpll)
4845 intel_enable_shared_dpll(intel_crtc); 4849 intel_enable_shared_dpll(intel_crtc);
4846 4850
@@ -5269,21 +5273,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5269 return max_cdclk_freq*90/100; 5273 return max_cdclk_freq*90/100;
5270} 5274}
5271 5275
5276static int skl_calc_cdclk(int max_pixclk, int vco);
5277
5272static void intel_update_max_cdclk(struct drm_device *dev) 5278static void intel_update_max_cdclk(struct drm_device *dev)
5273{ 5279{
5274 struct drm_i915_private *dev_priv = dev->dev_private; 5280 struct drm_i915_private *dev_priv = dev->dev_private;
5275 5281
5276 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5282 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5277 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5283 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5284 int max_cdclk, vco;
5278 5285
5286 vco = dev_priv->skl_preferred_vco_freq;
5287 WARN_ON(vco != 8100000 && vco != 8640000);
5288
5289 /*
5290 * Use the lower (vco 8640) cdclk values as a
5291 * first guess. skl_calc_cdclk() will correct it
5292 * if the preferred vco is 8100 instead.
5293 */
5279 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5294 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5280 dev_priv->max_cdclk_freq = 675000; 5295 max_cdclk = 617143;
5281 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5296 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5282 dev_priv->max_cdclk_freq = 540000; 5297 max_cdclk = 540000;
5283 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5298 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5284 dev_priv->max_cdclk_freq = 450000; 5299 max_cdclk = 432000;
5285 else 5300 else
5286 dev_priv->max_cdclk_freq = 337500; 5301 max_cdclk = 308571;
5302
5303 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5287 } else if (IS_BROXTON(dev)) { 5304 } else if (IS_BROXTON(dev)) {
5288 dev_priv->max_cdclk_freq = 624000; 5305 dev_priv->max_cdclk_freq = 624000;
5289 } else if (IS_BROADWELL(dev)) { 5306 } else if (IS_BROADWELL(dev)) {
@@ -5324,264 +5341,313 @@ static void intel_update_cdclk(struct drm_device *dev)
5324 struct drm_i915_private *dev_priv = dev->dev_private; 5341 struct drm_i915_private *dev_priv = dev->dev_private;
5325 5342
5326 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5343 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5327 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5344
5328 dev_priv->cdclk_freq); 5345 if (INTEL_GEN(dev_priv) >= 9)
5346 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5347 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5348 dev_priv->cdclk_pll.ref);
5349 else
5350 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5351 dev_priv->cdclk_freq);
5329 5352
5330 /* 5353 /*
5331 * Program the gmbus_freq based on the cdclk frequency. 5354 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5332 * BSpec erroneously claims we should aim for 4MHz, but 5355 * Programmng [sic] note: bit[9:2] should be programmed to the number
5333 * in fact 1MHz is the correct frequency. 5356 * of cdclk that generates 4MHz reference clock freq which is used to
5357 * generate GMBus clock. This will vary with the cdclk freq.
5334 */ 5358 */
5335 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5359 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5336 /*
5337 * Program the gmbus_freq based on the cdclk frequency.
5338 * BSpec erroneously claims we should aim for 4MHz, but
5339 * in fact 1MHz is the correct frequency.
5340 */
5341 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5360 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5342 } 5361}
5343 5362
5344 if (dev_priv->max_cdclk_freq == 0) 5363/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5345 intel_update_max_cdclk(dev); 5364static int skl_cdclk_decimal(int cdclk)
5365{
5366 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5346} 5367}
5347 5368
5348static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) 5369static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5349{ 5370{
5350 uint32_t divider; 5371 int ratio;
5351 uint32_t ratio; 5372
5352 uint32_t current_freq; 5373 if (cdclk == dev_priv->cdclk_pll.ref)
5353 int ret; 5374 return 0;
5354 5375
5355 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5376 switch (cdclk) {
5356 switch (frequency) { 5377 default:
5378 MISSING_CASE(cdclk);
5357 case 144000: 5379 case 144000:
5380 case 288000:
5381 case 384000:
5382 case 576000:
5383 ratio = 60;
5384 break;
5385 case 624000:
5386 ratio = 65;
5387 break;
5388 }
5389
5390 return dev_priv->cdclk_pll.ref * ratio;
5391}
5392
5393static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5394{
5395 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5396
5397 /* Timeout 200us */
5398 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
5399 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5400
5401 dev_priv->cdclk_pll.vco = 0;
5402}
5403
5404static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5405{
5406 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5407 u32 val;
5408
5409 val = I915_READ(BXT_DE_PLL_CTL);
5410 val &= ~BXT_DE_PLL_RATIO_MASK;
5411 val |= BXT_DE_PLL_RATIO(ratio);
5412 I915_WRITE(BXT_DE_PLL_CTL, val);
5413
5414 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5415
5416 /* Timeout 200us */
5417 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
5418 DRM_ERROR("timeout waiting for DE PLL lock\n");
5419
5420 dev_priv->cdclk_pll.vco = vco;
5421}
5422
5423static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5424{
5425 u32 val, divider;
5426 int vco, ret;
5427
5428 vco = bxt_de_pll_vco(dev_priv, cdclk);
5429
5430 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5431
5432 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5433 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5434 case 8:
5358 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5435 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5359 ratio = BXT_DE_PLL_RATIO(60);
5360 break; 5436 break;
5361 case 288000: 5437 case 4:
5362 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5438 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5363 ratio = BXT_DE_PLL_RATIO(60);
5364 break; 5439 break;
5365 case 384000: 5440 case 3:
5366 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5441 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5367 ratio = BXT_DE_PLL_RATIO(60);
5368 break; 5442 break;
5369 case 576000: 5443 case 2:
5370 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5371 ratio = BXT_DE_PLL_RATIO(60);
5372 break;
5373 case 624000:
5374 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5444 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5375 ratio = BXT_DE_PLL_RATIO(65);
5376 break;
5377 case 19200:
5378 /*
5379 * Bypass frequency with DE PLL disabled. Init ratio, divider
5380 * to suppress GCC warning.
5381 */
5382 ratio = 0;
5383 divider = 0;
5384 break; 5445 break;
5385 default: 5446 default:
5386 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5447 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5448 WARN_ON(vco != 0);
5387 5449
5388 return; 5450 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5451 break;
5389 } 5452 }
5390 5453
5391 mutex_lock(&dev_priv->rps.hw_lock);
5392 /* Inform power controller of upcoming frequency change */ 5454 /* Inform power controller of upcoming frequency change */
5455 mutex_lock(&dev_priv->rps.hw_lock);
5393 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5456 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5394 0x80000000); 5457 0x80000000);
5395 mutex_unlock(&dev_priv->rps.hw_lock); 5458 mutex_unlock(&dev_priv->rps.hw_lock);
5396 5459
5397 if (ret) { 5460 if (ret) {
5398 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5461 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5399 ret, frequency); 5462 ret, cdclk);
5400 return; 5463 return;
5401 } 5464 }
5402 5465
5403 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5466 if (dev_priv->cdclk_pll.vco != 0 &&
5404 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5467 dev_priv->cdclk_pll.vco != vco)
5405 current_freq = current_freq * 500 + 1000; 5468 bxt_de_pll_disable(dev_priv);
5406 5469
5407 /* 5470 if (dev_priv->cdclk_pll.vco != vco)
5408 * DE PLL has to be disabled when 5471 bxt_de_pll_enable(dev_priv, vco);
5409 * - setting to 19.2MHz (bypass, PLL isn't used)
5410 * - before setting to 624MHz (PLL needs toggling)
5411 * - before setting to any frequency from 624MHz (PLL needs toggling)
5412 */
5413 if (frequency == 19200 || frequency == 624000 ||
5414 current_freq == 624000) {
5415 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5416 /* Timeout 200us */
5417 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5418 1))
5419 DRM_ERROR("timout waiting for DE PLL unlock\n");
5420 }
5421
5422 if (frequency != 19200) {
5423 uint32_t val;
5424
5425 val = I915_READ(BXT_DE_PLL_CTL);
5426 val &= ~BXT_DE_PLL_RATIO_MASK;
5427 val |= ratio;
5428 I915_WRITE(BXT_DE_PLL_CTL, val);
5429
5430 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5431 /* Timeout 200us */
5432 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5433 DRM_ERROR("timeout waiting for DE PLL lock\n");
5434
5435 val = I915_READ(CDCLK_CTL);
5436 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5437 val |= divider;
5438 /*
5439 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5440 * enable otherwise.
5441 */
5442 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5443 if (frequency >= 500000)
5444 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5445 5472
5446 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5473 val = divider | skl_cdclk_decimal(cdclk);
5447 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5474 /*
5448 val |= (frequency - 1000) / 500; 5475 * FIXME if only the cd2x divider needs changing, it could be done
5449 I915_WRITE(CDCLK_CTL, val); 5476 * without shutting off the pipe (if only one pipe is active).
5450 } 5477 */
5478 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5479 /*
5480 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5481 * enable otherwise.
5482 */
5483 if (cdclk >= 500000)
5484 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5485 I915_WRITE(CDCLK_CTL, val);
5451 5486
5452 mutex_lock(&dev_priv->rps.hw_lock); 5487 mutex_lock(&dev_priv->rps.hw_lock);
5453 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5488 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5454 DIV_ROUND_UP(frequency, 25000)); 5489 DIV_ROUND_UP(cdclk, 25000));
5455 mutex_unlock(&dev_priv->rps.hw_lock); 5490 mutex_unlock(&dev_priv->rps.hw_lock);
5456 5491
5457 if (ret) { 5492 if (ret) {
5458 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5493 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5459 ret, frequency); 5494 ret, cdclk);
5460 return; 5495 return;
5461 } 5496 }
5462 5497
5463 intel_update_cdclk(dev_priv->dev); 5498 intel_update_cdclk(dev_priv->dev);
5464} 5499}
5465 5500
5466static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) 5501static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5467{ 5502{
5468 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) 5503 u32 cdctl, expected;
5469 return false;
5470 5504
5471 /* TODO: Check for a valid CDCLK rate */ 5505 intel_update_cdclk(dev_priv->dev);
5472 5506
5473 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { 5507 if (dev_priv->cdclk_pll.vco == 0 ||
5474 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); 5508 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5509 goto sanitize;
5475 5510
5476 return false; 5511 /* DPLL okay; verify the cdclock
5477 } 5512 *
5513 * Some BIOS versions leave an incorrect decimal frequency value and
5514 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5515 * so sanitize this register.
5516 */
5517 cdctl = I915_READ(CDCLK_CTL);
5518 /*
5519 * Let's ignore the pipe field, since BIOS could have configured the
5520 * dividers both synching to an active pipe, or asynchronously
5521 * (PIPE_NONE).
5522 */
5523 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5478 5524
5479 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { 5525 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5480 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); 5526 skl_cdclk_decimal(dev_priv->cdclk_freq);
5527 /*
5528 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5529 * enable otherwise.
5530 */
5531 if (dev_priv->cdclk_freq >= 500000)
5532 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5481 5533
5482 return false; 5534 if (cdctl == expected)
5483 } 5535 /* All well; nothing to sanitize */
5536 return;
5484 5537
5485 return true; 5538sanitize:
5486} 5539 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5487 5540
5488bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) 5541 /* force cdclk programming */
5489{ 5542 dev_priv->cdclk_freq = 0;
5490 return broxton_cdclk_is_enabled(dev_priv); 5543
5544 /* force full PLL disable + enable */
5545 dev_priv->cdclk_pll.vco = -1;
5491} 5546}
5492 5547
5493void broxton_init_cdclk(struct drm_i915_private *dev_priv) 5548void bxt_init_cdclk(struct drm_i915_private *dev_priv)
5494{ 5549{
5495 /* check if cd clock is enabled */ 5550 bxt_sanitize_cdclk(dev_priv);
5496 if (broxton_cdclk_is_enabled(dev_priv)) {
5497 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5498 return;
5499 }
5500 5551
5501 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); 5552 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5553 return;
5502 5554
5503 /* 5555 /*
5504 * FIXME: 5556 * FIXME:
5505 * - The initial CDCLK needs to be read from VBT. 5557 * - The initial CDCLK needs to be read from VBT.
5506 * Need to make this change after VBT has changes for BXT. 5558 * Need to make this change after VBT has changes for BXT.
5507 * - check if setting the max (or any) cdclk freq is really necessary
5508 * here, it belongs to modeset time
5509 */ 5559 */
5510 broxton_set_cdclk(dev_priv, 624000); 5560 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
5511 5561}
5512 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5513 POSTING_READ(DBUF_CTL);
5514 5562
5515 udelay(10); 5563void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
5564{
5565 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5566}
5516 5567
5517 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5568static int skl_calc_cdclk(int max_pixclk, int vco)
5518 DRM_ERROR("DBuf power enable timeout!\n"); 5569{
5570 if (vco == 8640000) {
5571 if (max_pixclk > 540000)
5572 return 617143;
5573 else if (max_pixclk > 432000)
5574 return 540000;
5575 else if (max_pixclk > 308571)
5576 return 432000;
5577 else
5578 return 308571;
5579 } else {
5580 if (max_pixclk > 540000)
5581 return 675000;
5582 else if (max_pixclk > 450000)
5583 return 540000;
5584 else if (max_pixclk > 337500)
5585 return 450000;
5586 else
5587 return 337500;
5588 }
5519} 5589}
5520 5590
5521void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) 5591static void
5592skl_dpll0_update(struct drm_i915_private *dev_priv)
5522{ 5593{
5523 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5594 u32 val;
5524 POSTING_READ(DBUF_CTL);
5525 5595
5526 udelay(10); 5596 dev_priv->cdclk_pll.ref = 24000;
5597 dev_priv->cdclk_pll.vco = 0;
5527 5598
5528 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5599 val = I915_READ(LCPLL1_CTL);
5529 DRM_ERROR("DBuf power disable timeout!\n"); 5600 if ((val & LCPLL_PLL_ENABLE) == 0)
5601 return;
5530 5602
5531 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5603 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5532 broxton_set_cdclk(dev_priv, 19200); 5604 return;
5533}
5534 5605
5535static const struct skl_cdclk_entry { 5606 val = I915_READ(DPLL_CTRL1);
5536 unsigned int freq;
5537 unsigned int vco;
5538} skl_cdclk_frequencies[] = {
5539 { .freq = 308570, .vco = 8640 },
5540 { .freq = 337500, .vco = 8100 },
5541 { .freq = 432000, .vco = 8640 },
5542 { .freq = 450000, .vco = 8100 },
5543 { .freq = 540000, .vco = 8100 },
5544 { .freq = 617140, .vco = 8640 },
5545 { .freq = 675000, .vco = 8100 },
5546};
5547 5607
5548static unsigned int skl_cdclk_decimal(unsigned int freq) 5608 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5549{ 5609 DPLL_CTRL1_SSC(SKL_DPLL0) |
5550 return (freq - 1000) / 500; 5610 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5611 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5612 return;
5613
5614 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5615 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5616 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5617 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5618 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5619 dev_priv->cdclk_pll.vco = 8100000;
5620 break;
5621 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5622 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5623 dev_priv->cdclk_pll.vco = 8640000;
5624 break;
5625 default:
5626 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5627 break;
5628 }
5551} 5629}
5552 5630
5553static unsigned int skl_cdclk_get_vco(unsigned int freq) 5631void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5554{ 5632{
5555 unsigned int i; 5633 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5556
5557 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5558 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5559 5634
5560 if (e->freq == freq) 5635 dev_priv->skl_preferred_vco_freq = vco;
5561 return e->vco;
5562 }
5563 5636
5564 return 8100; 5637 if (changed)
5638 intel_update_max_cdclk(dev_priv->dev);
5565} 5639}
5566 5640
5567static void 5641static void
5568skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5642skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5569{ 5643{
5570 unsigned int min_freq; 5644 int min_cdclk = skl_calc_cdclk(0, vco);
5571 u32 val; 5645 u32 val;
5572 5646
5573 /* select the minimum CDCLK before enabling DPLL 0 */ 5647 WARN_ON(vco != 8100000 && vco != 8640000);
5574 val = I915_READ(CDCLK_CTL);
5575 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5576 val |= CDCLK_FREQ_337_308;
5577
5578 if (required_vco == 8640)
5579 min_freq = 308570;
5580 else
5581 min_freq = 337500;
5582
5583 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5584 5648
5649 /* select the minimum CDCLK before enabling DPLL 0 */
5650 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5585 I915_WRITE(CDCLK_CTL, val); 5651 I915_WRITE(CDCLK_CTL, val);
5586 POSTING_READ(CDCLK_CTL); 5652 POSTING_READ(CDCLK_CTL);
5587 5653
@@ -5592,14 +5658,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5592 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5658 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5593 * The modeset code is responsible for the selection of the exact link 5659 * The modeset code is responsible for the selection of the exact link
5594 * rate later on, with the constraint of choosing a frequency that 5660 * rate later on, with the constraint of choosing a frequency that
5595 * works with required_vco. 5661 * works with vco.
5596 */ 5662 */
5597 val = I915_READ(DPLL_CTRL1); 5663 val = I915_READ(DPLL_CTRL1);
5598 5664
5599 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5665 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5600 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5666 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5601 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5667 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5602 if (required_vco == 8640) 5668 if (vco == 8640000)
5603 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5669 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5604 SKL_DPLL0); 5670 SKL_DPLL0);
5605 else 5671 else
@@ -5613,6 +5679,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5613 5679
5614 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5680 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5615 DRM_ERROR("DPLL0 not locked\n"); 5681 DRM_ERROR("DPLL0 not locked\n");
5682
5683 dev_priv->cdclk_pll.vco = vco;
5684
5685 /* We'll want to keep using the current vco from now on. */
5686 skl_set_preferred_cdclk_vco(dev_priv, vco);
5687}
5688
5689static void
5690skl_dpll0_disable(struct drm_i915_private *dev_priv)
5691{
5692 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5693 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5694 DRM_ERROR("Couldn't disable DPLL0\n");
5695
5696 dev_priv->cdclk_pll.vco = 0;
5616} 5697}
5617 5698
5618static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5699static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5642,12 +5723,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5642 return false; 5723 return false;
5643} 5724}
5644 5725
5645static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5726static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5646{ 5727{
5647 struct drm_device *dev = dev_priv->dev; 5728 struct drm_device *dev = dev_priv->dev;
5648 u32 freq_select, pcu_ack; 5729 u32 freq_select, pcu_ack;
5649 5730
5650 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5731 WARN_ON((cdclk == 24000) != (vco == 0));
5732
5733 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5651 5734
5652 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5735 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5653 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5736 DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5738,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5655 } 5738 }
5656 5739
5657 /* set CDCLK_CTL */ 5740 /* set CDCLK_CTL */
5658 switch(freq) { 5741 switch (cdclk) {
5659 case 450000: 5742 case 450000:
5660 case 432000: 5743 case 432000:
5661 freq_select = CDCLK_FREQ_450_432; 5744 freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5748,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5665 freq_select = CDCLK_FREQ_540; 5748 freq_select = CDCLK_FREQ_540;
5666 pcu_ack = 2; 5749 pcu_ack = 2;
5667 break; 5750 break;
5668 case 308570: 5751 case 308571:
5669 case 337500: 5752 case 337500:
5670 default: 5753 default:
5671 freq_select = CDCLK_FREQ_337_308; 5754 freq_select = CDCLK_FREQ_337_308;
5672 pcu_ack = 0; 5755 pcu_ack = 0;
5673 break; 5756 break;
5674 case 617140: 5757 case 617143:
5675 case 675000: 5758 case 675000:
5676 freq_select = CDCLK_FREQ_675_617; 5759 freq_select = CDCLK_FREQ_675_617;
5677 pcu_ack = 3; 5760 pcu_ack = 3;
5678 break; 5761 break;
5679 } 5762 }
5680 5763
5681 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5764 if (dev_priv->cdclk_pll.vco != 0 &&
5765 dev_priv->cdclk_pll.vco != vco)
5766 skl_dpll0_disable(dev_priv);
5767
5768 if (dev_priv->cdclk_pll.vco != vco)
5769 skl_dpll0_enable(dev_priv, vco);
5770
5771 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5682 POSTING_READ(CDCLK_CTL); 5772 POSTING_READ(CDCLK_CTL);
5683 5773
5684 /* inform PCU of the change */ 5774 /* inform PCU of the change */
@@ -5689,52 +5779,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5689 intel_update_cdclk(dev); 5779 intel_update_cdclk(dev);
5690} 5780}
5691 5781
5782static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5783
5692void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5784void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5693{ 5785{
5694 /* disable DBUF power */ 5786 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5695 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5696 POSTING_READ(DBUF_CTL);
5697
5698 udelay(10);
5699
5700 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5701 DRM_ERROR("DBuf power disable timeout\n");
5702
5703 /* disable DPLL0 */
5704 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5705 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5706 DRM_ERROR("Couldn't disable DPLL0\n");
5707} 5787}
5708 5788
5709void skl_init_cdclk(struct drm_i915_private *dev_priv) 5789void skl_init_cdclk(struct drm_i915_private *dev_priv)
5710{ 5790{
5711 unsigned int required_vco; 5791 int cdclk, vco;
5712 5792
5713 /* DPLL0 not enabled (happens on early BIOS versions) */ 5793 skl_sanitize_cdclk(dev_priv);
5714 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5715 /* enable DPLL0 */
5716 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5717 skl_dpll0_enable(dev_priv, required_vco);
5718 }
5719
5720 /* set CDCLK to the frequency the BIOS chose */
5721 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5722 5794
5723 /* enable DBUF power */ 5795 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5724 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5796 /*
5725 POSTING_READ(DBUF_CTL); 5797 * Use the current vco as our initial
5798 * guess as to what the preferred vco is.
5799 */
5800 if (dev_priv->skl_preferred_vco_freq == 0)
5801 skl_set_preferred_cdclk_vco(dev_priv,
5802 dev_priv->cdclk_pll.vco);
5803 return;
5804 }
5726 5805
5727 udelay(10); 5806 vco = dev_priv->skl_preferred_vco_freq;
5807 if (vco == 0)
5808 vco = 8100000;
5809 cdclk = skl_calc_cdclk(0, vco);
5728 5810
5729 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5811 skl_set_cdclk(dev_priv, cdclk, vco);
5730 DRM_ERROR("DBuf power enable timeout\n");
5731} 5812}
5732 5813
5733int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5814static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5734{ 5815{
5735 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5816 uint32_t cdctl, expected;
5736 uint32_t cdctl = I915_READ(CDCLK_CTL);
5737 int freq = dev_priv->skl_boot_cdclk;
5738 5817
5739 /* 5818 /*
5740 * check if the pre-os intialized the display 5819 * check if the pre-os intialized the display
@@ -5744,8 +5823,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5744 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5823 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5745 goto sanitize; 5824 goto sanitize;
5746 5825
5826 intel_update_cdclk(dev_priv->dev);
5747 /* Is PLL enabled and locked ? */ 5827 /* Is PLL enabled and locked ? */
5748 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5828 if (dev_priv->cdclk_pll.vco == 0 ||
5829 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5749 goto sanitize; 5830 goto sanitize;
5750 5831
5751 /* DPLL okay; verify the cdclock 5832 /* DPLL okay; verify the cdclock
@@ -5754,19 +5835,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5754 * decimal part is programmed wrong from BIOS where pre-os does not 5835 * decimal part is programmed wrong from BIOS where pre-os does not
5755 * enable display. Verify the same as well. 5836 * enable display. Verify the same as well.
5756 */ 5837 */
5757 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5838 cdctl = I915_READ(CDCLK_CTL);
5839 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5840 skl_cdclk_decimal(dev_priv->cdclk_freq);
5841 if (cdctl == expected)
5758 /* All well; nothing to sanitize */ 5842 /* All well; nothing to sanitize */
5759 return false; 5843 return;
5844
5760sanitize: 5845sanitize:
5761 /* 5846 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5762 * As of now initialize with max cdclk till
5763 * we get dynamic cdclk support
5764 * */
5765 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5766 skl_init_cdclk(dev_priv);
5767 5847
5768 /* we did have to sanitize */ 5848 /* force cdclk programming */
5769 return true; 5849 dev_priv->cdclk_freq = 0;
5850 /* force full PLL disable + enable */
5851 dev_priv->cdclk_pll.vco = -1;
5770} 5852}
5771 5853
5772/* Adjust CDclk dividers to allow high res or save power if possible */ 5854/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -5906,21 +5988,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5906 return 200000; 5988 return 200000;
5907} 5989}
5908 5990
5909static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5991static int bxt_calc_cdclk(int max_pixclk)
5910 int max_pixclk)
5911{ 5992{
5912 /* 5993 if (max_pixclk > 576000)
5913 * FIXME:
5914 * - remove the guardband, it's not needed on BXT
5915 * - set 19.2MHz bypass frequency if there are no active pipes
5916 */
5917 if (max_pixclk > 576000*9/10)
5918 return 624000; 5994 return 624000;
5919 else if (max_pixclk > 384000*9/10) 5995 else if (max_pixclk > 384000)
5920 return 576000; 5996 return 576000;
5921 else if (max_pixclk > 288000*9/10) 5997 else if (max_pixclk > 288000)
5922 return 384000; 5998 return 384000;
5923 else if (max_pixclk > 144000*9/10) 5999 else if (max_pixclk > 144000)
5924 return 288000; 6000 return 288000;
5925 else 6001 else
5926 return 144000; 6002 return 144000;
@@ -5963,9 +6039,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5963 struct intel_atomic_state *intel_state = 6039 struct intel_atomic_state *intel_state =
5964 to_intel_atomic_state(state); 6040 to_intel_atomic_state(state);
5965 6041
5966 if (max_pixclk < 0)
5967 return max_pixclk;
5968
5969 intel_state->cdclk = intel_state->dev_cdclk = 6042 intel_state->cdclk = intel_state->dev_cdclk =
5970 valleyview_calc_cdclk(dev_priv, max_pixclk); 6043 valleyview_calc_cdclk(dev_priv, max_pixclk);
5971 6044
@@ -5975,22 +6048,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5975 return 0; 6048 return 0;
5976} 6049}
5977 6050
5978static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 6051static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
5979{ 6052{
5980 struct drm_device *dev = state->dev; 6053 int max_pixclk = ilk_max_pixel_rate(state);
5981 struct drm_i915_private *dev_priv = dev->dev_private;
5982 int max_pixclk = intel_mode_max_pixclk(dev, state);
5983 struct intel_atomic_state *intel_state = 6054 struct intel_atomic_state *intel_state =
5984 to_intel_atomic_state(state); 6055 to_intel_atomic_state(state);
5985 6056
5986 if (max_pixclk < 0)
5987 return max_pixclk;
5988
5989 intel_state->cdclk = intel_state->dev_cdclk = 6057 intel_state->cdclk = intel_state->dev_cdclk =
5990 broxton_calc_cdclk(dev_priv, max_pixclk); 6058 bxt_calc_cdclk(max_pixclk);
5991 6059
5992 if (!intel_state->active_crtcs) 6060 if (!intel_state->active_crtcs)
5993 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); 6061 intel_state->dev_cdclk = bxt_calc_cdclk(0);
5994 6062
5995 return 0; 6063 return 0;
5996} 6064}
@@ -6252,7 +6320,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6252 return; 6320 return;
6253 6321
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6322 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 WARN_ON(intel_crtc->unpin_work); 6323 WARN_ON(intel_crtc->flip_work);
6256 6324
6257 intel_pre_disable_primary_noatomic(crtc); 6325 intel_pre_disable_primary_noatomic(crtc);
6258 6326
@@ -6262,8 +6330,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6262 6330
6263 dev_priv->display.crtc_disable(crtc); 6331 dev_priv->display.crtc_disable(crtc);
6264 6332
6265 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", 6333 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6266 crtc->base.id); 6334 crtc->base.id, crtc->name);
6267 6335
6268 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6336 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6269 crtc->state->active = false; 6337 crtc->state->active = false;
@@ -6563,10 +6631,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6563 struct drm_device *dev = crtc->base.dev; 6631 struct drm_device *dev = crtc->base.dev;
6564 struct drm_i915_private *dev_priv = dev->dev_private; 6632 struct drm_i915_private *dev_priv = dev->dev_private;
6565 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6633 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6634 int clock_limit = dev_priv->max_dotclk_freq;
6566 6635
6567 /* FIXME should check pixel clock limits on all platforms */
6568 if (INTEL_INFO(dev)->gen < 4) { 6636 if (INTEL_INFO(dev)->gen < 4) {
6569 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6637 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6570 6638
6571 /* 6639 /*
6572 * Enable double wide mode when the dot clock 6640 * Enable double wide mode when the dot clock
@@ -6574,16 +6642,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6574 */ 6642 */
6575 if (intel_crtc_supports_double_wide(crtc) && 6643 if (intel_crtc_supports_double_wide(crtc) &&
6576 adjusted_mode->crtc_clock > clock_limit) { 6644 adjusted_mode->crtc_clock > clock_limit) {
6577 clock_limit *= 2; 6645 clock_limit = dev_priv->max_dotclk_freq;
6578 pipe_config->double_wide = true; 6646 pipe_config->double_wide = true;
6579 } 6647 }
6648 }
6580 6649
6581 if (adjusted_mode->crtc_clock > clock_limit) { 6650 if (adjusted_mode->crtc_clock > clock_limit) {
6582 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6651 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6583 adjusted_mode->crtc_clock, clock_limit, 6652 adjusted_mode->crtc_clock, clock_limit,
6584 yesno(pipe_config->double_wide)); 6653 yesno(pipe_config->double_wide));
6585 return -EINVAL; 6654 return -EINVAL;
6586 }
6587 } 6655 }
6588 6656
6589 /* 6657 /*
@@ -6615,76 +6683,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6615static int skylake_get_display_clock_speed(struct drm_device *dev) 6683static int skylake_get_display_clock_speed(struct drm_device *dev)
6616{ 6684{
6617 struct drm_i915_private *dev_priv = to_i915(dev); 6685 struct drm_i915_private *dev_priv = to_i915(dev);
6618 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6686 uint32_t cdctl;
6619 uint32_t cdctl = I915_READ(CDCLK_CTL);
6620 uint32_t linkrate;
6621 6687
6622 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6688 skl_dpll0_update(dev_priv);
6623 return 24000; /* 24MHz is the cd freq with NSSC ref */
6624 6689
6625 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6690 if (dev_priv->cdclk_pll.vco == 0)
6626 return 540000; 6691 return dev_priv->cdclk_pll.ref;
6627 6692
6628 linkrate = (I915_READ(DPLL_CTRL1) & 6693 cdctl = I915_READ(CDCLK_CTL);
6629 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6630 6694
6631 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6695 if (dev_priv->cdclk_pll.vco == 8640000) {
6632 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6633 /* vco 8640 */
6634 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6696 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6635 case CDCLK_FREQ_450_432: 6697 case CDCLK_FREQ_450_432:
6636 return 432000; 6698 return 432000;
6637 case CDCLK_FREQ_337_308: 6699 case CDCLK_FREQ_337_308:
6638 return 308570; 6700 return 308571;
6701 case CDCLK_FREQ_540:
6702 return 540000;
6639 case CDCLK_FREQ_675_617: 6703 case CDCLK_FREQ_675_617:
6640 return 617140; 6704 return 617143;
6641 default: 6705 default:
6642 WARN(1, "Unknown cd freq selection\n"); 6706 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6643 } 6707 }
6644 } else { 6708 } else {
6645 /* vco 8100 */
6646 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6709 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6647 case CDCLK_FREQ_450_432: 6710 case CDCLK_FREQ_450_432:
6648 return 450000; 6711 return 450000;
6649 case CDCLK_FREQ_337_308: 6712 case CDCLK_FREQ_337_308:
6650 return 337500; 6713 return 337500;
6714 case CDCLK_FREQ_540:
6715 return 540000;
6651 case CDCLK_FREQ_675_617: 6716 case CDCLK_FREQ_675_617:
6652 return 675000; 6717 return 675000;
6653 default: 6718 default:
6654 WARN(1, "Unknown cd freq selection\n"); 6719 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6655 } 6720 }
6656 } 6721 }
6657 6722
6658 /* error case, do as if DPLL0 isn't enabled */ 6723 return dev_priv->cdclk_pll.ref;
6659 return 24000; 6724}
6725
6726static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6727{
6728 u32 val;
6729
6730 dev_priv->cdclk_pll.ref = 19200;
6731 dev_priv->cdclk_pll.vco = 0;
6732
6733 val = I915_READ(BXT_DE_PLL_ENABLE);
6734 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6735 return;
6736
6737 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6738 return;
6739
6740 val = I915_READ(BXT_DE_PLL_CTL);
6741 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6742 dev_priv->cdclk_pll.ref;
6660} 6743}
6661 6744
6662static int broxton_get_display_clock_speed(struct drm_device *dev) 6745static int broxton_get_display_clock_speed(struct drm_device *dev)
6663{ 6746{
6664 struct drm_i915_private *dev_priv = to_i915(dev); 6747 struct drm_i915_private *dev_priv = to_i915(dev);
6665 uint32_t cdctl = I915_READ(CDCLK_CTL); 6748 u32 divider;
6666 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6749 int div, vco;
6667 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6668 int cdclk;
6669 6750
6670 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6751 bxt_de_pll_update(dev_priv);
6671 return 19200; 6752
6753 vco = dev_priv->cdclk_pll.vco;
6754 if (vco == 0)
6755 return dev_priv->cdclk_pll.ref;
6672 6756
6673 cdclk = 19200 * pll_ratio / 2; 6757 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6674 6758
6675 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6759 switch (divider) {
6676 case BXT_CDCLK_CD2X_DIV_SEL_1: 6760 case BXT_CDCLK_CD2X_DIV_SEL_1:
6677 return cdclk; /* 576MHz or 624MHz */ 6761 div = 2;
6762 break;
6678 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6763 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6679 return cdclk * 2 / 3; /* 384MHz */ 6764 div = 3;
6765 break;
6680 case BXT_CDCLK_CD2X_DIV_SEL_2: 6766 case BXT_CDCLK_CD2X_DIV_SEL_2:
6681 return cdclk / 2; /* 288MHz */ 6767 div = 4;
6768 break;
6682 case BXT_CDCLK_CD2X_DIV_SEL_4: 6769 case BXT_CDCLK_CD2X_DIV_SEL_4:
6683 return cdclk / 4; /* 144MHz */ 6770 div = 8;
6771 break;
6772 default:
6773 MISSING_CASE(divider);
6774 return dev_priv->cdclk_pll.ref;
6684 } 6775 }
6685 6776
6686 /* error case, do as if DE PLL isn't enabled */ 6777 return DIV_ROUND_CLOSEST(vco, div);
6687 return 19200;
6688} 6778}
6689 6779
6690static int broadwell_get_display_clock_speed(struct drm_device *dev) 6780static int broadwell_get_display_clock_speed(struct drm_device *dev)
@@ -7063,7 +7153,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7063 7153
7064static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7154static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7065 struct intel_crtc_state *crtc_state, 7155 struct intel_crtc_state *crtc_state,
7066 intel_clock_t *reduced_clock) 7156 struct dpll *reduced_clock)
7067{ 7157{
7068 struct drm_device *dev = crtc->base.dev; 7158 struct drm_device *dev = crtc->base.dev;
7069 u32 fp, fp2 = 0; 7159 u32 fp, fp2 = 0;
@@ -7487,7 +7577,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7487 7577
7488static void i9xx_compute_dpll(struct intel_crtc *crtc, 7578static void i9xx_compute_dpll(struct intel_crtc *crtc,
7489 struct intel_crtc_state *crtc_state, 7579 struct intel_crtc_state *crtc_state,
7490 intel_clock_t *reduced_clock) 7580 struct dpll *reduced_clock)
7491{ 7581{
7492 struct drm_device *dev = crtc->base.dev; 7582 struct drm_device *dev = crtc->base.dev;
7493 struct drm_i915_private *dev_priv = dev->dev_private; 7583 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7563,7 +7653,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7563 7653
7564static void i8xx_compute_dpll(struct intel_crtc *crtc, 7654static void i8xx_compute_dpll(struct intel_crtc *crtc,
7565 struct intel_crtc_state *crtc_state, 7655 struct intel_crtc_state *crtc_state,
7566 intel_clock_t *reduced_clock) 7656 struct dpll *reduced_clock)
7567{ 7657{
7568 struct drm_device *dev = crtc->base.dev; 7658 struct drm_device *dev = crtc->base.dev;
7569 struct drm_i915_private *dev_priv = dev->dev_private; 7659 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7817,7 +7907,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7817{ 7907{
7818 struct drm_device *dev = crtc->base.dev; 7908 struct drm_device *dev = crtc->base.dev;
7819 struct drm_i915_private *dev_priv = dev->dev_private; 7909 struct drm_i915_private *dev_priv = dev->dev_private;
7820 const intel_limit_t *limit; 7910 const struct intel_limit *limit;
7821 int refclk = 48000; 7911 int refclk = 48000;
7822 7912
7823 memset(&crtc_state->dpll_hw_state, 0, 7913 memset(&crtc_state->dpll_hw_state, 0,
@@ -7853,7 +7943,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7853{ 7943{
7854 struct drm_device *dev = crtc->base.dev; 7944 struct drm_device *dev = crtc->base.dev;
7855 struct drm_i915_private *dev_priv = dev->dev_private; 7945 struct drm_i915_private *dev_priv = dev->dev_private;
7856 const intel_limit_t *limit; 7946 const struct intel_limit *limit;
7857 int refclk = 96000; 7947 int refclk = 96000;
7858 7948
7859 memset(&crtc_state->dpll_hw_state, 0, 7949 memset(&crtc_state->dpll_hw_state, 0,
@@ -7896,7 +7986,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7896{ 7986{
7897 struct drm_device *dev = crtc->base.dev; 7987 struct drm_device *dev = crtc->base.dev;
7898 struct drm_i915_private *dev_priv = dev->dev_private; 7988 struct drm_i915_private *dev_priv = dev->dev_private;
7899 const intel_limit_t *limit; 7989 const struct intel_limit *limit;
7900 int refclk = 96000; 7990 int refclk = 96000;
7901 7991
7902 memset(&crtc_state->dpll_hw_state, 0, 7992 memset(&crtc_state->dpll_hw_state, 0,
@@ -7930,7 +8020,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7930{ 8020{
7931 struct drm_device *dev = crtc->base.dev; 8021 struct drm_device *dev = crtc->base.dev;
7932 struct drm_i915_private *dev_priv = dev->dev_private; 8022 struct drm_i915_private *dev_priv = dev->dev_private;
7933 const intel_limit_t *limit; 8023 const struct intel_limit *limit;
7934 int refclk = 96000; 8024 int refclk = 96000;
7935 8025
7936 memset(&crtc_state->dpll_hw_state, 0, 8026 memset(&crtc_state->dpll_hw_state, 0,
@@ -7963,7 +8053,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7963 struct intel_crtc_state *crtc_state) 8053 struct intel_crtc_state *crtc_state)
7964{ 8054{
7965 int refclk = 100000; 8055 int refclk = 100000;
7966 const intel_limit_t *limit = &intel_limits_chv; 8056 const struct intel_limit *limit = &intel_limits_chv;
7967 8057
7968 memset(&crtc_state->dpll_hw_state, 0, 8058 memset(&crtc_state->dpll_hw_state, 0,
7969 sizeof(crtc_state->dpll_hw_state)); 8059 sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8074,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7984 struct intel_crtc_state *crtc_state) 8074 struct intel_crtc_state *crtc_state)
7985{ 8075{
7986 int refclk = 100000; 8076 int refclk = 100000;
7987 const intel_limit_t *limit = &intel_limits_vlv; 8077 const struct intel_limit *limit = &intel_limits_vlv;
7988 8078
7989 memset(&crtc_state->dpll_hw_state, 0, 8079 memset(&crtc_state->dpll_hw_state, 0,
7990 sizeof(crtc_state->dpll_hw_state)); 8080 sizeof(crtc_state->dpll_hw_state));
@@ -8034,7 +8124,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8034 struct drm_device *dev = crtc->base.dev; 8124 struct drm_device *dev = crtc->base.dev;
8035 struct drm_i915_private *dev_priv = dev->dev_private; 8125 struct drm_i915_private *dev_priv = dev->dev_private;
8036 int pipe = pipe_config->cpu_transcoder; 8126 int pipe = pipe_config->cpu_transcoder;
8037 intel_clock_t clock; 8127 struct dpll clock;
8038 u32 mdiv; 8128 u32 mdiv;
8039 int refclk = 100000; 8129 int refclk = 100000;
8040 8130
@@ -8131,7 +8221,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
8131 struct drm_i915_private *dev_priv = dev->dev_private; 8221 struct drm_i915_private *dev_priv = dev->dev_private;
8132 int pipe = pipe_config->cpu_transcoder; 8222 int pipe = pipe_config->cpu_transcoder;
8133 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8223 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8134 intel_clock_t clock; 8224 struct dpll clock;
8135 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8225 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8136 int refclk = 100000; 8226 int refclk = 100000;
8137 8227
@@ -8275,12 +8365,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8275{ 8365{
8276 struct drm_i915_private *dev_priv = dev->dev_private; 8366 struct drm_i915_private *dev_priv = dev->dev_private;
8277 struct intel_encoder *encoder; 8367 struct intel_encoder *encoder;
8368 int i;
8278 u32 val, final; 8369 u32 val, final;
8279 bool has_lvds = false; 8370 bool has_lvds = false;
8280 bool has_cpu_edp = false; 8371 bool has_cpu_edp = false;
8281 bool has_panel = false; 8372 bool has_panel = false;
8282 bool has_ck505 = false; 8373 bool has_ck505 = false;
8283 bool can_ssc = false; 8374 bool can_ssc = false;
8375 bool using_ssc_source = false;
8284 8376
8285 /* We need to take the global config into account */ 8377 /* We need to take the global config into account */
8286 for_each_intel_encoder(dev, encoder) { 8378 for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8399,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8307 can_ssc = true; 8399 can_ssc = true;
8308 } 8400 }
8309 8401
8310 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8402 /* Check if any DPLLs are using the SSC source */
8311 has_panel, has_lvds, has_ck505); 8403 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8404 u32 temp = I915_READ(PCH_DPLL(i));
8405
8406 if (!(temp & DPLL_VCO_ENABLE))
8407 continue;
8408
8409 if ((temp & PLL_REF_INPUT_MASK) ==
8410 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8411 using_ssc_source = true;
8412 break;
8413 }
8414 }
8415
8416 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8417 has_panel, has_lvds, has_ck505, using_ssc_source);
8312 8418
8313 /* Ironlake: try to setup display ref clock before DPLL 8419 /* Ironlake: try to setup display ref clock before DPLL
8314 * enabling. This is only under driver's control after 8420 * enabling. This is only under driver's control after
@@ -8345,9 +8451,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8345 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8451 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8346 } else 8452 } else
8347 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8453 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8348 } else { 8454 } else if (using_ssc_source) {
8349 final |= DREF_SSC_SOURCE_DISABLE; 8455 final |= DREF_SSC_SOURCE_ENABLE;
8350 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8456 final |= DREF_SSC1_ENABLE;
8351 } 8457 }
8352 8458
8353 if (final == val) 8459 if (final == val)
@@ -8393,7 +8499,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8393 POSTING_READ(PCH_DREF_CONTROL); 8499 POSTING_READ(PCH_DREF_CONTROL);
8394 udelay(200); 8500 udelay(200);
8395 } else { 8501 } else {
8396 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8502 DRM_DEBUG_KMS("Disabling CPU source output\n");
8397 8503
8398 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8504 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8399 8505
@@ -8404,16 +8510,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8404 POSTING_READ(PCH_DREF_CONTROL); 8510 POSTING_READ(PCH_DREF_CONTROL);
8405 udelay(200); 8511 udelay(200);
8406 8512
8407 /* Turn off the SSC source */ 8513 if (!using_ssc_source) {
8408 val &= ~DREF_SSC_SOURCE_MASK; 8514 DRM_DEBUG_KMS("Disabling SSC source\n");
8409 val |= DREF_SSC_SOURCE_DISABLE;
8410 8515
8411 /* Turn off SSC1 */ 8516 /* Turn off the SSC source */
8412 val &= ~DREF_SSC1_ENABLE; 8517 val &= ~DREF_SSC_SOURCE_MASK;
8518 val |= DREF_SSC_SOURCE_DISABLE;
8413 8519
8414 I915_WRITE(PCH_DREF_CONTROL, val); 8520 /* Turn off SSC1 */
8415 POSTING_READ(PCH_DREF_CONTROL); 8521 val &= ~DREF_SSC1_ENABLE;
8416 udelay(200); 8522
8523 I915_WRITE(PCH_DREF_CONTROL, val);
8524 POSTING_READ(PCH_DREF_CONTROL);
8525 udelay(200);
8526 }
8417 } 8527 }
8418 8528
8419 BUG_ON(val != final); 8529 BUG_ON(val != final);
@@ -8794,7 +8904,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8794 8904
8795static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8905static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8796 struct intel_crtc_state *crtc_state, 8906 struct intel_crtc_state *crtc_state,
8797 intel_clock_t *reduced_clock) 8907 struct dpll *reduced_clock)
8798{ 8908{
8799 struct drm_crtc *crtc = &intel_crtc->base; 8909 struct drm_crtc *crtc = &intel_crtc->base;
8800 struct drm_device *dev = crtc->dev; 8910 struct drm_device *dev = crtc->dev;
@@ -8902,10 +9012,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8902{ 9012{
8903 struct drm_device *dev = crtc->base.dev; 9013 struct drm_device *dev = crtc->base.dev;
8904 struct drm_i915_private *dev_priv = dev->dev_private; 9014 struct drm_i915_private *dev_priv = dev->dev_private;
8905 intel_clock_t reduced_clock; 9015 struct dpll reduced_clock;
8906 bool has_reduced_clock = false; 9016 bool has_reduced_clock = false;
8907 struct intel_shared_dpll *pll; 9017 struct intel_shared_dpll *pll;
8908 const intel_limit_t *limit; 9018 const struct intel_limit *limit;
8909 int refclk = 120000; 9019 int refclk = 120000;
8910 9020
8911 memset(&crtc_state->dpll_hw_state, 0, 9021 memset(&crtc_state->dpll_hw_state, 0,
@@ -9300,6 +9410,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9300 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9410 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9301 9411
9302 if (HAS_PCH_IBX(dev_priv)) { 9412 if (HAS_PCH_IBX(dev_priv)) {
9413 /*
9414 * The pipe->pch transcoder and pch transcoder->pll
9415 * mapping is fixed.
9416 */
9303 pll_id = (enum intel_dpll_id) crtc->pipe; 9417 pll_id = (enum intel_dpll_id) crtc->pipe;
9304 } else { 9418 } else {
9305 tmp = I915_READ(PCH_DPLL_SEL); 9419 tmp = I915_READ(PCH_DPLL_SEL);
@@ -9560,14 +9674,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9560 } 9674 }
9561} 9675}
9562 9676
9563static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9677static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9564{ 9678{
9565 struct drm_device *dev = old_state->dev; 9679 struct drm_device *dev = old_state->dev;
9566 struct intel_atomic_state *old_intel_state = 9680 struct intel_atomic_state *old_intel_state =
9567 to_intel_atomic_state(old_state); 9681 to_intel_atomic_state(old_state);
9568 unsigned int req_cdclk = old_intel_state->dev_cdclk; 9682 unsigned int req_cdclk = old_intel_state->dev_cdclk;
9569 9683
9570 broxton_set_cdclk(to_i915(dev), req_cdclk); 9684 bxt_set_cdclk(to_i915(dev), req_cdclk);
9571} 9685}
9572 9686
9573/* compute the max rate for new configuration */ 9687/* compute the max rate for new configuration */
@@ -9687,6 +9801,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9687 cdclk, dev_priv->cdclk_freq); 9801 cdclk, dev_priv->cdclk_freq);
9688} 9802}
9689 9803
9804static int broadwell_calc_cdclk(int max_pixclk)
9805{
9806 if (max_pixclk > 540000)
9807 return 675000;
9808 else if (max_pixclk > 450000)
9809 return 540000;
9810 else if (max_pixclk > 337500)
9811 return 450000;
9812 else
9813 return 337500;
9814}
9815
9690static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9816static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9691{ 9817{
9692 struct drm_i915_private *dev_priv = to_i915(state->dev); 9818 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9698,14 +9824,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9698 * FIXME should also account for plane ratio 9824 * FIXME should also account for plane ratio
9699 * once 64bpp pixel formats are supported. 9825 * once 64bpp pixel formats are supported.
9700 */ 9826 */
9701 if (max_pixclk > 540000) 9827 cdclk = broadwell_calc_cdclk(max_pixclk);
9702 cdclk = 675000;
9703 else if (max_pixclk > 450000)
9704 cdclk = 540000;
9705 else if (max_pixclk > 337500)
9706 cdclk = 450000;
9707 else
9708 cdclk = 337500;
9709 9828
9710 if (cdclk > dev_priv->max_cdclk_freq) { 9829 if (cdclk > dev_priv->max_cdclk_freq) {
9711 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9830 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9715,7 +9834,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9715 9834
9716 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9835 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9717 if (!intel_state->active_crtcs) 9836 if (!intel_state->active_crtcs)
9718 intel_state->dev_cdclk = 337500; 9837 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9719 9838
9720 return 0; 9839 return 0;
9721} 9840}
@@ -9730,6 +9849,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9730 broadwell_set_cdclk(dev, req_cdclk); 9849 broadwell_set_cdclk(dev, req_cdclk);
9731} 9850}
9732 9851
9852static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9853{
9854 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9855 struct drm_i915_private *dev_priv = to_i915(state->dev);
9856 const int max_pixclk = ilk_max_pixel_rate(state);
9857 int vco = intel_state->cdclk_pll_vco;
9858 int cdclk;
9859
9860 /*
9861 * FIXME should also account for plane ratio
9862 * once 64bpp pixel formats are supported.
9863 */
9864 cdclk = skl_calc_cdclk(max_pixclk, vco);
9865
9866 /*
9867 * FIXME move the cdclk caclulation to
9868 * compute_config() so we can fail gracegully.
9869 */
9870 if (cdclk > dev_priv->max_cdclk_freq) {
9871 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9872 cdclk, dev_priv->max_cdclk_freq);
9873 cdclk = dev_priv->max_cdclk_freq;
9874 }
9875
9876 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9877 if (!intel_state->active_crtcs)
9878 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9879
9880 return 0;
9881}
9882
9883static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9884{
9885 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9886 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9887 unsigned int req_cdclk = intel_state->dev_cdclk;
9888 unsigned int req_vco = intel_state->cdclk_pll_vco;
9889
9890 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9891}
9892
9733static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9893static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9734 struct intel_crtc_state *crtc_state) 9894 struct intel_crtc_state *crtc_state)
9735{ 9895{
@@ -9850,6 +10010,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9850 enum intel_display_power_domain power_domain; 10010 enum intel_display_power_domain power_domain;
9851 u32 tmp; 10011 u32 tmp;
9852 10012
10013 /*
10014 * The pipe->transcoder mapping is fixed with the exception of the eDP
10015 * transcoder handled below.
10016 */
9853 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10017 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9854 10018
9855 /* 10019 /*
@@ -10317,10 +10481,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10317 struct drm_i915_gem_object *obj; 10481 struct drm_i915_gem_object *obj;
10318 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10482 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10319 10483
10320 obj = i915_gem_alloc_object(dev, 10484 obj = i915_gem_object_create(dev,
10321 intel_framebuffer_size_for_mode(mode, bpp)); 10485 intel_framebuffer_size_for_mode(mode, bpp));
10322 if (obj == NULL) 10486 if (IS_ERR(obj))
10323 return ERR_PTR(-ENOMEM); 10487 return ERR_CAST(obj);
10324 10488
10325 mode_cmd.width = mode->hdisplay; 10489 mode_cmd.width = mode->hdisplay;
10326 mode_cmd.height = mode->vdisplay; 10490 mode_cmd.height = mode->vdisplay;
@@ -10632,7 +10796,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10632 int pipe = pipe_config->cpu_transcoder; 10796 int pipe = pipe_config->cpu_transcoder;
10633 u32 dpll = pipe_config->dpll_hw_state.dpll; 10797 u32 dpll = pipe_config->dpll_hw_state.dpll;
10634 u32 fp; 10798 u32 fp;
10635 intel_clock_t clock; 10799 struct dpll clock;
10636 int port_clock; 10800 int port_clock;
10637 int refclk = i9xx_pll_refclk(dev, pipe_config); 10801 int refclk = i9xx_pll_refclk(dev, pipe_config);
10638 10802
@@ -10806,31 +10970,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10806 return mode; 10970 return mode;
10807} 10971}
10808 10972
10809void intel_mark_busy(struct drm_device *dev) 10973void intel_mark_busy(struct drm_i915_private *dev_priv)
10810{ 10974{
10811 struct drm_i915_private *dev_priv = dev->dev_private;
10812
10813 if (dev_priv->mm.busy) 10975 if (dev_priv->mm.busy)
10814 return; 10976 return;
10815 10977
10816 intel_runtime_pm_get(dev_priv); 10978 intel_runtime_pm_get(dev_priv);
10817 i915_update_gfx_val(dev_priv); 10979 i915_update_gfx_val(dev_priv);
10818 if (INTEL_INFO(dev)->gen >= 6) 10980 if (INTEL_GEN(dev_priv) >= 6)
10819 gen6_rps_busy(dev_priv); 10981 gen6_rps_busy(dev_priv);
10820 dev_priv->mm.busy = true; 10982 dev_priv->mm.busy = true;
10821} 10983}
10822 10984
10823void intel_mark_idle(struct drm_device *dev) 10985void intel_mark_idle(struct drm_i915_private *dev_priv)
10824{ 10986{
10825 struct drm_i915_private *dev_priv = dev->dev_private;
10826
10827 if (!dev_priv->mm.busy) 10987 if (!dev_priv->mm.busy)
10828 return; 10988 return;
10829 10989
10830 dev_priv->mm.busy = false; 10990 dev_priv->mm.busy = false;
10831 10991
10832 if (INTEL_INFO(dev)->gen >= 6) 10992 if (INTEL_GEN(dev_priv) >= 6)
10833 gen6_rps_idle(dev->dev_private); 10993 gen6_rps_idle(dev_priv);
10834 10994
10835 intel_runtime_pm_put(dev_priv); 10995 intel_runtime_pm_put(dev_priv);
10836} 10996}
@@ -10839,15 +10999,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10839{ 10999{
10840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11000 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10841 struct drm_device *dev = crtc->dev; 11001 struct drm_device *dev = crtc->dev;
10842 struct intel_unpin_work *work; 11002 struct intel_flip_work *work;
10843 11003
10844 spin_lock_irq(&dev->event_lock); 11004 spin_lock_irq(&dev->event_lock);
10845 work = intel_crtc->unpin_work; 11005 work = intel_crtc->flip_work;
10846 intel_crtc->unpin_work = NULL; 11006 intel_crtc->flip_work = NULL;
10847 spin_unlock_irq(&dev->event_lock); 11007 spin_unlock_irq(&dev->event_lock);
10848 11008
10849 if (work) { 11009 if (work) {
10850 cancel_work_sync(&work->work); 11010 cancel_work_sync(&work->mmio_work);
11011 cancel_work_sync(&work->unpin_work);
10851 kfree(work); 11012 kfree(work);
10852 } 11013 }
10853 11014
@@ -10858,12 +11019,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10858 11019
10859static void intel_unpin_work_fn(struct work_struct *__work) 11020static void intel_unpin_work_fn(struct work_struct *__work)
10860{ 11021{
10861 struct intel_unpin_work *work = 11022 struct intel_flip_work *work =
10862 container_of(__work, struct intel_unpin_work, work); 11023 container_of(__work, struct intel_flip_work, unpin_work);
10863 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11024 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10864 struct drm_device *dev = crtc->base.dev; 11025 struct drm_device *dev = crtc->base.dev;
10865 struct drm_plane *primary = crtc->base.primary; 11026 struct drm_plane *primary = crtc->base.primary;
10866 11027
11028 if (is_mmio_work(work))
11029 flush_work(&work->mmio_work);
11030
10867 mutex_lock(&dev->struct_mutex); 11031 mutex_lock(&dev->struct_mutex);
10868 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11032 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10869 drm_gem_object_unreference(&work->pending_flip_obj->base); 11033 drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10882,60 +11046,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
10882 kfree(work); 11046 kfree(work);
10883} 11047}
10884 11048
10885static void do_intel_finish_page_flip(struct drm_device *dev,
10886 struct drm_crtc *crtc)
10887{
10888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10889 struct intel_unpin_work *work;
10890 unsigned long flags;
10891
10892 /* Ignore early vblank irqs */
10893 if (intel_crtc == NULL)
10894 return;
10895
10896 /*
10897 * This is called both by irq handlers and the reset code (to complete
10898 * lost pageflips) so needs the full irqsave spinlocks.
10899 */
10900 spin_lock_irqsave(&dev->event_lock, flags);
10901 work = intel_crtc->unpin_work;
10902
10903 /* Ensure we don't miss a work->pending update ... */
10904 smp_rmb();
10905
10906 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10907 spin_unlock_irqrestore(&dev->event_lock, flags);
10908 return;
10909 }
10910
10911 page_flip_completed(intel_crtc);
10912
10913 spin_unlock_irqrestore(&dev->event_lock, flags);
10914}
10915
10916void intel_finish_page_flip(struct drm_device *dev, int pipe)
10917{
10918 struct drm_i915_private *dev_priv = dev->dev_private;
10919 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10920
10921 do_intel_finish_page_flip(dev, crtc);
10922}
10923
10924void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10925{
10926 struct drm_i915_private *dev_priv = dev->dev_private;
10927 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10928
10929 do_intel_finish_page_flip(dev, crtc);
10930}
10931
10932/* Is 'a' after or equal to 'b'? */ 11049/* Is 'a' after or equal to 'b'? */
10933static bool g4x_flip_count_after_eq(u32 a, u32 b) 11050static bool g4x_flip_count_after_eq(u32 a, u32 b)
10934{ 11051{
10935 return !((a - b) & 0x80000000); 11052 return !((a - b) & 0x80000000);
10936} 11053}
10937 11054
10938static bool page_flip_finished(struct intel_crtc *crtc) 11055static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11056 struct intel_flip_work *work)
10939{ 11057{
10940 struct drm_device *dev = crtc->base.dev; 11058 struct drm_device *dev = crtc->base.dev;
10941 struct drm_i915_private *dev_priv = dev->dev_private; 11059 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10977,40 +11095,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10977 * anyway, we don't really care. 11095 * anyway, we don't really care.
10978 */ 11096 */
10979 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11097 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10980 crtc->unpin_work->gtt_offset && 11098 crtc->flip_work->gtt_offset &&
10981 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11099 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10982 crtc->unpin_work->flip_count); 11100 crtc->flip_work->flip_count);
10983} 11101}
10984 11102
10985void intel_prepare_page_flip(struct drm_device *dev, int plane) 11103static bool
11104__pageflip_finished_mmio(struct intel_crtc *crtc,
11105 struct intel_flip_work *work)
10986{ 11106{
10987 struct drm_i915_private *dev_priv = dev->dev_private; 11107 /*
10988 struct intel_crtc *intel_crtc = 11108 * MMIO work completes when vblank is different from
10989 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 11109 * flip_queued_vblank.
11110 *
11111 * Reset counter value doesn't matter, this is handled by
11112 * i915_wait_request finishing early, so no need to handle
11113 * reset here.
11114 */
11115 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11116}
11117
11118
11119static bool pageflip_finished(struct intel_crtc *crtc,
11120 struct intel_flip_work *work)
11121{
11122 if (!atomic_read(&work->pending))
11123 return false;
11124
11125 smp_rmb();
11126
11127 if (is_mmio_work(work))
11128 return __pageflip_finished_mmio(crtc, work);
11129 else
11130 return __pageflip_finished_cs(crtc, work);
11131}
11132
11133void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11134{
11135 struct drm_device *dev = dev_priv->dev;
11136 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11138 struct intel_flip_work *work;
10990 unsigned long flags; 11139 unsigned long flags;
10991 11140
11141 /* Ignore early vblank irqs */
11142 if (!crtc)
11143 return;
10992 11144
10993 /* 11145 /*
10994 * This is called both by irq handlers and the reset code (to complete 11146 * This is called both by irq handlers and the reset code (to complete
10995 * lost pageflips) so needs the full irqsave spinlocks. 11147 * lost pageflips) so needs the full irqsave spinlocks.
10996 *
10997 * NB: An MMIO update of the plane base pointer will also
10998 * generate a page-flip completion irq, i.e. every modeset
10999 * is also accompanied by a spurious intel_prepare_page_flip().
11000 */ 11148 */
11001 spin_lock_irqsave(&dev->event_lock, flags); 11149 spin_lock_irqsave(&dev->event_lock, flags);
11002 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 11150 work = intel_crtc->flip_work;
11003 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 11151
11152 if (work != NULL &&
11153 !is_mmio_work(work) &&
11154 pageflip_finished(intel_crtc, work))
11155 page_flip_completed(intel_crtc);
11156
11004 spin_unlock_irqrestore(&dev->event_lock, flags); 11157 spin_unlock_irqrestore(&dev->event_lock, flags);
11005} 11158}
11006 11159
11007static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 11160void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11008{ 11161{
11162 struct drm_device *dev = dev_priv->dev;
11163 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11165 struct intel_flip_work *work;
11166 unsigned long flags;
11167
11168 /* Ignore early vblank irqs */
11169 if (!crtc)
11170 return;
11171
11172 /*
11173 * This is called both by irq handlers and the reset code (to complete
11174 * lost pageflips) so needs the full irqsave spinlocks.
11175 */
11176 spin_lock_irqsave(&dev->event_lock, flags);
11177 work = intel_crtc->flip_work;
11178
11179 if (work != NULL &&
11180 is_mmio_work(work) &&
11181 pageflip_finished(intel_crtc, work))
11182 page_flip_completed(intel_crtc);
11183
11184 spin_unlock_irqrestore(&dev->event_lock, flags);
11185}
11186
11187static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11188 struct intel_flip_work *work)
11189{
11190 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11191
11009 /* Ensure that the work item is consistent when activating it ... */ 11192 /* Ensure that the work item is consistent when activating it ... */
11010 smp_wmb(); 11193 smp_mb__before_atomic();
11011 atomic_set(&work->pending, INTEL_FLIP_PENDING); 11194 atomic_set(&work->pending, 1);
11012 /* and that it is marked active as soon as the irq could fire. */
11013 smp_wmb();
11014} 11195}
11015 11196
11016static int intel_gen2_queue_flip(struct drm_device *dev, 11197static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11041,10 +11222,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
11041 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11222 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11042 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11223 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11043 intel_ring_emit(engine, fb->pitches[0]); 11224 intel_ring_emit(engine, fb->pitches[0]);
11044 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11225 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11045 intel_ring_emit(engine, 0); /* aux display base address, unused */ 11226 intel_ring_emit(engine, 0); /* aux display base address, unused */
11046 11227
11047 intel_mark_page_flip_active(intel_crtc->unpin_work);
11048 return 0; 11228 return 0;
11049} 11229}
11050 11230
@@ -11073,10 +11253,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
11073 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | 11253 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11074 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11254 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11075 intel_ring_emit(engine, fb->pitches[0]); 11255 intel_ring_emit(engine, fb->pitches[0]);
11076 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11256 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11077 intel_ring_emit(engine, MI_NOOP); 11257 intel_ring_emit(engine, MI_NOOP);
11078 11258
11079 intel_mark_page_flip_active(intel_crtc->unpin_work);
11080 return 0; 11259 return 0;
11081} 11260}
11082 11261
@@ -11104,7 +11283,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11104 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11283 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11105 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11284 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11106 intel_ring_emit(engine, fb->pitches[0]); 11285 intel_ring_emit(engine, fb->pitches[0]);
11107 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | 11286 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11108 obj->tiling_mode); 11287 obj->tiling_mode);
11109 11288
11110 /* XXX Enabling the panel-fitter across page-flip is so far 11289 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -11115,7 +11294,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11115 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11294 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11116 intel_ring_emit(engine, pf | pipesrc); 11295 intel_ring_emit(engine, pf | pipesrc);
11117 11296
11118 intel_mark_page_flip_active(intel_crtc->unpin_work);
11119 return 0; 11297 return 0;
11120} 11298}
11121 11299
@@ -11139,7 +11317,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11139 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11317 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11140 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11318 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11141 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); 11319 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11142 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11320 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11143 11321
11144 /* Contrary to the suggestions in the documentation, 11322 /* Contrary to the suggestions in the documentation,
11145 * "Enable Panel Fitter" does not seem to be required when page 11323 * "Enable Panel Fitter" does not seem to be required when page
@@ -11151,7 +11329,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11151 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11329 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11152 intel_ring_emit(engine, pf | pipesrc); 11330 intel_ring_emit(engine, pf | pipesrc);
11153 11331
11154 intel_mark_page_flip_active(intel_crtc->unpin_work);
11155 return 0; 11332 return 0;
11156} 11333}
11157 11334
@@ -11243,16 +11420,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11243 11420
11244 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); 11421 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11245 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); 11422 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11246 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11423 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11247 intel_ring_emit(engine, (MI_NOOP)); 11424 intel_ring_emit(engine, (MI_NOOP));
11248 11425
11249 intel_mark_page_flip_active(intel_crtc->unpin_work);
11250 return 0; 11426 return 0;
11251} 11427}
11252 11428
11253static bool use_mmio_flip(struct intel_engine_cs *engine, 11429static bool use_mmio_flip(struct intel_engine_cs *engine,
11254 struct drm_i915_gem_object *obj) 11430 struct drm_i915_gem_object *obj)
11255{ 11431{
11432 struct reservation_object *resv;
11433
11256 /* 11434 /*
11257 * This is not being used for older platforms, because 11435 * This is not being used for older platforms, because
11258 * non-availability of flip done interrupt forces us to use 11436 * non-availability of flip done interrupt forces us to use
@@ -11264,7 +11442,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11264 if (engine == NULL) 11442 if (engine == NULL)
11265 return true; 11443 return true;
11266 11444
11267 if (INTEL_INFO(engine->dev)->gen < 5) 11445 if (INTEL_GEN(engine->i915) < 5)
11268 return false; 11446 return false;
11269 11447
11270 if (i915.use_mmio_flip < 0) 11448 if (i915.use_mmio_flip < 0)
@@ -11273,17 +11451,17 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11273 return true; 11451 return true;
11274 else if (i915.enable_execlists) 11452 else if (i915.enable_execlists)
11275 return true; 11453 return true;
11276 else if (obj->base.dma_buf && 11454
11277 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, 11455 resv = i915_gem_object_get_dmabuf_resv(obj);
11278 false)) 11456 if (resv && !reservation_object_test_signaled_rcu(resv, false))
11279 return true; 11457 return true;
11280 else 11458
11281 return engine != i915_gem_request_get_engine(obj->last_write_req); 11459 return engine != i915_gem_request_get_engine(obj->last_write_req);
11282} 11460}
11283 11461
11284static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11462static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11285 unsigned int rotation, 11463 unsigned int rotation,
11286 struct intel_unpin_work *work) 11464 struct intel_flip_work *work)
11287{ 11465{
11288 struct drm_device *dev = intel_crtc->base.dev; 11466 struct drm_device *dev = intel_crtc->base.dev;
11289 struct drm_i915_private *dev_priv = dev->dev_private; 11467 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11335,7 +11513,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11335} 11513}
11336 11514
11337static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11515static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11338 struct intel_unpin_work *work) 11516 struct intel_flip_work *work)
11339{ 11517{
11340 struct drm_device *dev = intel_crtc->base.dev; 11518 struct drm_device *dev = intel_crtc->base.dev;
11341 struct drm_i915_private *dev_priv = dev->dev_private; 11519 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11358,78 +11536,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11358 POSTING_READ(DSPSURF(intel_crtc->plane)); 11536 POSTING_READ(DSPSURF(intel_crtc->plane));
11359} 11537}
11360 11538
11361/* 11539static void intel_mmio_flip_work_func(struct work_struct *w)
11362 * XXX: This is the temporary way to update the plane registers until we get
11363 * around to using the usual plane update functions for MMIO flips
11364 */
11365static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11366{
11367 struct intel_crtc *crtc = mmio_flip->crtc;
11368 struct intel_unpin_work *work;
11369
11370 spin_lock_irq(&crtc->base.dev->event_lock);
11371 work = crtc->unpin_work;
11372 spin_unlock_irq(&crtc->base.dev->event_lock);
11373 if (work == NULL)
11374 return;
11375
11376 intel_mark_page_flip_active(work);
11377
11378 intel_pipe_update_start(crtc);
11379
11380 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11381 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11382 else
11383 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11384 ilk_do_mmio_flip(crtc, work);
11385
11386 intel_pipe_update_end(crtc);
11387}
11388
11389static void intel_mmio_flip_work_func(struct work_struct *work)
11390{ 11540{
11391 struct intel_mmio_flip *mmio_flip = 11541 struct intel_flip_work *work =
11392 container_of(work, struct intel_mmio_flip, work); 11542 container_of(w, struct intel_flip_work, mmio_work);
11543 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11544 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11393 struct intel_framebuffer *intel_fb = 11545 struct intel_framebuffer *intel_fb =
11394 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11546 to_intel_framebuffer(crtc->base.primary->fb);
11395 struct drm_i915_gem_object *obj = intel_fb->obj; 11547 struct drm_i915_gem_object *obj = intel_fb->obj;
11548 struct reservation_object *resv;
11396 11549
11397 if (mmio_flip->req) { 11550 if (work->flip_queued_req)
11398 WARN_ON(__i915_wait_request(mmio_flip->req, 11551 WARN_ON(__i915_wait_request(work->flip_queued_req,
11399 false, NULL, 11552 false, NULL,
11400 &mmio_flip->i915->rps.mmioflips)); 11553 &dev_priv->rps.mmioflips));
11401 i915_gem_request_unreference__unlocked(mmio_flip->req);
11402 }
11403 11554
11404 /* For framebuffer backed by dmabuf, wait for fence */ 11555 /* For framebuffer backed by dmabuf, wait for fence */
11405 if (obj->base.dma_buf) 11556 resv = i915_gem_object_get_dmabuf_resv(obj);
11406 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 11557 if (resv)
11407 false, false, 11558 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
11408 MAX_SCHEDULE_TIMEOUT) < 0); 11559 MAX_SCHEDULE_TIMEOUT) < 0);
11409 11560
11410 intel_do_mmio_flip(mmio_flip); 11561 intel_pipe_update_start(crtc);
11411 kfree(mmio_flip);
11412}
11413
11414static int intel_queue_mmio_flip(struct drm_device *dev,
11415 struct drm_crtc *crtc,
11416 struct drm_i915_gem_object *obj)
11417{
11418 struct intel_mmio_flip *mmio_flip;
11419
11420 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11421 if (mmio_flip == NULL)
11422 return -ENOMEM;
11423
11424 mmio_flip->i915 = to_i915(dev);
11425 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11426 mmio_flip->crtc = to_intel_crtc(crtc);
11427 mmio_flip->rotation = crtc->primary->state->rotation;
11428 11562
11429 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11563 if (INTEL_GEN(dev_priv) >= 9)
11430 schedule_work(&mmio_flip->work); 11564 skl_do_mmio_flip(crtc, work->rotation, work);
11565 else
11566 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11567 ilk_do_mmio_flip(crtc, work);
11431 11568
11432 return 0; 11569 intel_pipe_update_end(crtc, work);
11433} 11570}
11434 11571
11435static int intel_default_queue_flip(struct drm_device *dev, 11572static int intel_default_queue_flip(struct drm_device *dev,
@@ -11442,37 +11579,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
11442 return -ENODEV; 11579 return -ENODEV;
11443} 11580}
11444 11581
11445static bool __intel_pageflip_stall_check(struct drm_device *dev, 11582static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11446 struct drm_crtc *crtc) 11583 struct intel_crtc *intel_crtc,
11584 struct intel_flip_work *work)
11447{ 11585{
11448 struct drm_i915_private *dev_priv = dev->dev_private; 11586 u32 addr, vblank;
11449 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11450 struct intel_unpin_work *work = intel_crtc->unpin_work;
11451 u32 addr;
11452
11453 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11454 return true;
11455 11587
11456 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) 11588 if (!atomic_read(&work->pending))
11457 return false; 11589 return false;
11458 11590
11459 if (!work->enable_stall_check) 11591 smp_rmb();
11460 return false;
11461 11592
11593 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11462 if (work->flip_ready_vblank == 0) { 11594 if (work->flip_ready_vblank == 0) {
11463 if (work->flip_queued_req && 11595 if (work->flip_queued_req &&
11464 !i915_gem_request_completed(work->flip_queued_req, true)) 11596 !i915_gem_request_completed(work->flip_queued_req, true))
11465 return false; 11597 return false;
11466 11598
11467 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11599 work->flip_ready_vblank = vblank;
11468 } 11600 }
11469 11601
11470 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11602 if (vblank - work->flip_ready_vblank < 3)
11471 return false; 11603 return false;
11472 11604
11473 /* Potential stall - if we see that the flip has happened, 11605 /* Potential stall - if we see that the flip has happened,
11474 * assume a missed interrupt. */ 11606 * assume a missed interrupt. */
11475 if (INTEL_INFO(dev)->gen >= 4) 11607 if (INTEL_GEN(dev_priv) >= 4)
11476 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11608 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11477 else 11609 else
11478 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11610 addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11484,12 +11616,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
11484 return addr == work->gtt_offset; 11616 return addr == work->gtt_offset;
11485} 11617}
11486 11618
11487void intel_check_page_flip(struct drm_device *dev, int pipe) 11619void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11488{ 11620{
11489 struct drm_i915_private *dev_priv = dev->dev_private; 11621 struct drm_device *dev = dev_priv->dev;
11490 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11622 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11492 struct intel_unpin_work *work; 11624 struct intel_flip_work *work;
11493 11625
11494 WARN_ON(!in_interrupt()); 11626 WARN_ON(!in_interrupt());
11495 11627
@@ -11497,19 +11629,24 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
11497 return; 11629 return;
11498 11630
11499 spin_lock(&dev->event_lock); 11631 spin_lock(&dev->event_lock);
11500 work = intel_crtc->unpin_work; 11632 work = intel_crtc->flip_work;
11501 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11633
11502 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11634 if (work != NULL && !is_mmio_work(work) &&
11503 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11635 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11636 WARN_ONCE(1,
11637 "Kicking stuck page flip: queued at %d, now %d\n",
11638 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11504 page_flip_completed(intel_crtc); 11639 page_flip_completed(intel_crtc);
11505 work = NULL; 11640 work = NULL;
11506 } 11641 }
11507 if (work != NULL && 11642
11508 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11643 if (work != NULL && !is_mmio_work(work) &&
11509 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11644 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11645 intel_queue_rps_boost_for_request(work->flip_queued_req);
11510 spin_unlock(&dev->event_lock); 11646 spin_unlock(&dev->event_lock);
11511} 11647}
11512 11648
11649__maybe_unused
11513static int intel_crtc_page_flip(struct drm_crtc *crtc, 11650static int intel_crtc_page_flip(struct drm_crtc *crtc,
11514 struct drm_framebuffer *fb, 11651 struct drm_framebuffer *fb,
11515 struct drm_pending_vblank_event *event, 11652 struct drm_pending_vblank_event *event,
@@ -11522,7 +11659,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11523 struct drm_plane *primary = crtc->primary; 11660 struct drm_plane *primary = crtc->primary;
11524 enum pipe pipe = intel_crtc->pipe; 11661 enum pipe pipe = intel_crtc->pipe;
11525 struct intel_unpin_work *work; 11662 struct intel_flip_work *work;
11526 struct intel_engine_cs *engine; 11663 struct intel_engine_cs *engine;
11527 bool mmio_flip; 11664 bool mmio_flip;
11528 struct drm_i915_gem_request *request = NULL; 11665 struct drm_i915_gem_request *request = NULL;
@@ -11559,19 +11696,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11559 work->event = event; 11696 work->event = event;
11560 work->crtc = crtc; 11697 work->crtc = crtc;
11561 work->old_fb = old_fb; 11698 work->old_fb = old_fb;
11562 INIT_WORK(&work->work, intel_unpin_work_fn); 11699 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11563 11700
11564 ret = drm_crtc_vblank_get(crtc); 11701 ret = drm_crtc_vblank_get(crtc);
11565 if (ret) 11702 if (ret)
11566 goto free_work; 11703 goto free_work;
11567 11704
11568 /* We borrow the event spin lock for protecting unpin_work */ 11705 /* We borrow the event spin lock for protecting flip_work */
11569 spin_lock_irq(&dev->event_lock); 11706 spin_lock_irq(&dev->event_lock);
11570 if (intel_crtc->unpin_work) { 11707 if (intel_crtc->flip_work) {
11571 /* Before declaring the flip queue wedged, check if 11708 /* Before declaring the flip queue wedged, check if
11572 * the hardware completed the operation behind our backs. 11709 * the hardware completed the operation behind our backs.
11573 */ 11710 */
11574 if (__intel_pageflip_stall_check(dev, crtc)) { 11711 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11575 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11712 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11576 page_flip_completed(intel_crtc); 11713 page_flip_completed(intel_crtc);
11577 } else { 11714 } else {
@@ -11583,7 +11720,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11583 return -EBUSY; 11720 return -EBUSY;
11584 } 11721 }
11585 } 11722 }
11586 intel_crtc->unpin_work = work; 11723 intel_crtc->flip_work = work;
11587 spin_unlock_irq(&dev->event_lock); 11724 spin_unlock_irq(&dev->event_lock);
11588 11725
11589 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11726 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11595,7 +11732,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11595 11732
11596 crtc->primary->fb = fb; 11733 crtc->primary->fb = fb;
11597 update_state_fb(crtc->primary); 11734 update_state_fb(crtc->primary);
11598 intel_fbc_pre_update(intel_crtc); 11735
11736 intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11737 to_intel_plane_state(primary->state));
11599 11738
11600 work->pending_flip_obj = obj; 11739 work->pending_flip_obj = obj;
11601 11740
@@ -11638,6 +11777,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11638 */ 11777 */
11639 if (!mmio_flip) { 11778 if (!mmio_flip) {
11640 ret = i915_gem_object_sync(obj, engine, &request); 11779 ret = i915_gem_object_sync(obj, engine, &request);
11780 if (!ret && !request) {
11781 request = i915_gem_request_alloc(engine, NULL);
11782 ret = PTR_ERR_OR_ZERO(request);
11783 }
11784
11641 if (ret) 11785 if (ret)
11642 goto cleanup_pending; 11786 goto cleanup_pending;
11643 } 11787 }
@@ -11649,38 +11793,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11649 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11793 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11650 obj, 0); 11794 obj, 0);
11651 work->gtt_offset += intel_crtc->dspaddr_offset; 11795 work->gtt_offset += intel_crtc->dspaddr_offset;
11796 work->rotation = crtc->primary->state->rotation;
11652 11797
11653 if (mmio_flip) { 11798 if (mmio_flip) {
11654 ret = intel_queue_mmio_flip(dev, crtc, obj); 11799 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11655 if (ret)
11656 goto cleanup_unpin;
11657 11800
11658 i915_gem_request_assign(&work->flip_queued_req, 11801 i915_gem_request_assign(&work->flip_queued_req,
11659 obj->last_write_req); 11802 obj->last_write_req);
11660 } else {
11661 if (!request) {
11662 request = i915_gem_request_alloc(engine, NULL);
11663 if (IS_ERR(request)) {
11664 ret = PTR_ERR(request);
11665 goto cleanup_unpin;
11666 }
11667 }
11668 11803
11804 schedule_work(&work->mmio_work);
11805 } else {
11806 i915_gem_request_assign(&work->flip_queued_req, request);
11669 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11807 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11670 page_flip_flags); 11808 page_flip_flags);
11671 if (ret) 11809 if (ret)
11672 goto cleanup_unpin; 11810 goto cleanup_unpin;
11673 11811
11674 i915_gem_request_assign(&work->flip_queued_req, request); 11812 intel_mark_page_flip_active(intel_crtc, work);
11675 }
11676 11813
11677 if (request)
11678 i915_add_request_no_flush(request); 11814 i915_add_request_no_flush(request);
11815 }
11679 11816
11680 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11817 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11681 work->enable_stall_check = true;
11682
11683 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11684 to_intel_plane(primary)->frontbuffer_bit); 11818 to_intel_plane(primary)->frontbuffer_bit);
11685 mutex_unlock(&dev->struct_mutex); 11819 mutex_unlock(&dev->struct_mutex);
11686 11820
@@ -11706,7 +11840,7 @@ cleanup:
11706 drm_framebuffer_unreference(work->old_fb); 11840 drm_framebuffer_unreference(work->old_fb);
11707 11841
11708 spin_lock_irq(&dev->event_lock); 11842 spin_lock_irq(&dev->event_lock);
11709 intel_crtc->unpin_work = NULL; 11843 intel_crtc->flip_work = NULL;
11710 spin_unlock_irq(&dev->event_lock); 11844 spin_unlock_irq(&dev->event_lock);
11711 11845
11712 drm_crtc_vblank_put(crtc); 11846 drm_crtc_vblank_put(crtc);
@@ -11808,12 +11942,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11808 struct drm_i915_private *dev_priv = to_i915(dev); 11942 struct drm_i915_private *dev_priv = to_i915(dev);
11809 struct intel_plane_state *old_plane_state = 11943 struct intel_plane_state *old_plane_state =
11810 to_intel_plane_state(plane->state); 11944 to_intel_plane_state(plane->state);
11811 int idx = intel_crtc->base.base.id, ret;
11812 bool mode_changed = needs_modeset(crtc_state); 11945 bool mode_changed = needs_modeset(crtc_state);
11813 bool was_crtc_enabled = crtc->state->active; 11946 bool was_crtc_enabled = crtc->state->active;
11814 bool is_crtc_enabled = crtc_state->active; 11947 bool is_crtc_enabled = crtc_state->active;
11815 bool turn_off, turn_on, visible, was_visible; 11948 bool turn_off, turn_on, visible, was_visible;
11816 struct drm_framebuffer *fb = plane_state->fb; 11949 struct drm_framebuffer *fb = plane_state->fb;
11950 int ret;
11817 11951
11818 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11952 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11819 plane->type != DRM_PLANE_TYPE_CURSOR) { 11953 plane->type != DRM_PLANE_TYPE_CURSOR) {
@@ -11834,6 +11968,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11834 * Visibility is calculated as if the crtc was on, but 11968 * Visibility is calculated as if the crtc was on, but
11835 * after scaler setup everything depends on it being off 11969 * after scaler setup everything depends on it being off
11836 * when the crtc isn't active. 11970 * when the crtc isn't active.
11971 *
11972 * FIXME this is wrong for watermarks. Watermarks should also
11973 * be computed as if the pipe would be active. Perhaps move
11974 * per-plane wm computation to the .check_plane() hook, and
11975 * only combine the results from all planes in the current place?
11837 */ 11976 */
11838 if (!is_crtc_enabled) 11977 if (!is_crtc_enabled)
11839 to_intel_plane_state(plane_state)->visible = visible = false; 11978 to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11847,11 +11986,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11847 turn_off = was_visible && (!visible || mode_changed); 11986 turn_off = was_visible && (!visible || mode_changed);
11848 turn_on = visible && (!was_visible || mode_changed); 11987 turn_on = visible && (!was_visible || mode_changed);
11849 11988
11850 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11989 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11851 plane->base.id, fb ? fb->base.id : -1); 11990 intel_crtc->base.base.id,
11991 intel_crtc->base.name,
11992 plane->base.id, plane->name,
11993 fb ? fb->base.id : -1);
11852 11994
11853 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11995 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11854 plane->base.id, was_visible, visible, 11996 plane->base.id, plane->name,
11997 was_visible, visible,
11855 turn_off, turn_on, mode_changed); 11998 turn_off, turn_on, mode_changed);
11856 11999
11857 if (turn_on) { 12000 if (turn_on) {
@@ -12007,7 +12150,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12007 } 12150 }
12008 } else if (dev_priv->display.compute_intermediate_wm) { 12151 } else if (dev_priv->display.compute_intermediate_wm) {
12009 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12152 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12010 pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; 12153 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12011 } 12154 }
12012 12155
12013 if (INTEL_INFO(dev)->gen >= 9) { 12156 if (INTEL_INFO(dev)->gen >= 9) {
@@ -12142,7 +12285,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12142 struct intel_plane_state *state; 12285 struct intel_plane_state *state;
12143 struct drm_framebuffer *fb; 12286 struct drm_framebuffer *fb;
12144 12287
12145 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12288 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12289 crtc->base.base.id, crtc->base.name,
12146 context, pipe_config, pipe_name(crtc->pipe)); 12290 context, pipe_config, pipe_name(crtc->pipe));
12147 12291
12148 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12292 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12243,29 +12387,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12243 state = to_intel_plane_state(plane->state); 12387 state = to_intel_plane_state(plane->state);
12244 fb = state->base.fb; 12388 fb = state->base.fb;
12245 if (!fb) { 12389 if (!fb) {
12246 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12390 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12247 "disabled, scaler_id = %d\n", 12391 plane->base.id, plane->name, state->scaler_id);
12248 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12249 plane->base.id, intel_plane->pipe,
12250 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12251 drm_plane_index(plane), state->scaler_id);
12252 continue; 12392 continue;
12253 } 12393 }
12254 12394
12255 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12395 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12256 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12396 plane->base.id, plane->name);
12257 plane->base.id, intel_plane->pipe, 12397 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12258 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12398 fb->base.id, fb->width, fb->height,
12259 drm_plane_index(plane)); 12399 drm_get_format_name(fb->pixel_format));
12260 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12400 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12261 fb->base.id, fb->width, fb->height, fb->pixel_format); 12401 state->scaler_id,
12262 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12402 state->src.x1 >> 16, state->src.y1 >> 16,
12263 state->scaler_id, 12403 drm_rect_width(&state->src) >> 16,
12264 state->src.x1 >> 16, state->src.y1 >> 16, 12404 drm_rect_height(&state->src) >> 16,
12265 drm_rect_width(&state->src) >> 16, 12405 state->dst.x1, state->dst.y1,
12266 drm_rect_height(&state->src) >> 16, 12406 drm_rect_width(&state->dst),
12267 state->dst.x1, state->dst.y1, 12407 drm_rect_height(&state->dst));
12268 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12269 } 12408 }
12270} 12409}
12271 12410
@@ -12684,6 +12823,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12684 12823
12685 PIPE_CONF_CHECK_I(has_dp_encoder); 12824 PIPE_CONF_CHECK_I(has_dp_encoder);
12686 PIPE_CONF_CHECK_I(lane_count); 12825 PIPE_CONF_CHECK_I(lane_count);
12826 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12687 12827
12688 if (INTEL_INFO(dev)->gen < 8) { 12828 if (INTEL_INFO(dev)->gen < 8) {
12689 PIPE_CONF_CHECK_M_N(dp_m_n); 12829 PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12932,7 +13072,7 @@ verify_crtc_state(struct drm_crtc *crtc,
12932 pipe_config->base.crtc = crtc; 13072 pipe_config->base.crtc = crtc;
12933 pipe_config->base.state = old_state; 13073 pipe_config->base.state = old_state;
12934 13074
12935 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 13075 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12936 13076
12937 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 13077 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12938 13078
@@ -13280,6 +13420,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13280 intel_state->active_crtcs |= 1 << i; 13420 intel_state->active_crtcs |= 1 << i;
13281 else 13421 else
13282 intel_state->active_crtcs &= ~(1 << i); 13422 intel_state->active_crtcs &= ~(1 << i);
13423
13424 if (crtc_state->active != crtc->state->active)
13425 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13283 } 13426 }
13284 13427
13285 /* 13428 /*
@@ -13290,9 +13433,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13290 * adjusted_mode bits in the crtc directly. 13433 * adjusted_mode bits in the crtc directly.
13291 */ 13434 */
13292 if (dev_priv->display.modeset_calc_cdclk) { 13435 if (dev_priv->display.modeset_calc_cdclk) {
13436 if (!intel_state->cdclk_pll_vco)
13437 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13438 if (!intel_state->cdclk_pll_vco)
13439 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13440
13293 ret = dev_priv->display.modeset_calc_cdclk(state); 13441 ret = dev_priv->display.modeset_calc_cdclk(state);
13442 if (ret < 0)
13443 return ret;
13294 13444
13295 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) 13445 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13446 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13296 ret = intel_modeset_all_pipes(state); 13447 ret = intel_modeset_all_pipes(state);
13297 13448
13298 if (ret < 0) 13449 if (ret < 0)
@@ -13316,38 +13467,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13316 * phase. The code here should be run after the per-crtc and per-plane 'check' 13467 * phase. The code here should be run after the per-crtc and per-plane 'check'
13317 * handlers to ensure that all derived state has been updated. 13468 * handlers to ensure that all derived state has been updated.
13318 */ 13469 */
13319static void calc_watermark_data(struct drm_atomic_state *state) 13470static int calc_watermark_data(struct drm_atomic_state *state)
13320{ 13471{
13321 struct drm_device *dev = state->dev; 13472 struct drm_device *dev = state->dev;
13322 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13473 struct drm_i915_private *dev_priv = to_i915(dev);
13323 struct drm_crtc *crtc;
13324 struct drm_crtc_state *cstate;
13325 struct drm_plane *plane;
13326 struct drm_plane_state *pstate;
13327
13328 /*
13329 * Calculate watermark configuration details now that derived
13330 * plane/crtc state is all properly updated.
13331 */
13332 drm_for_each_crtc(crtc, dev) {
13333 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13334 crtc->state;
13335
13336 if (cstate->active)
13337 intel_state->wm_config.num_pipes_active++;
13338 }
13339 drm_for_each_legacy_plane(plane, dev) {
13340 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13341 plane->state;
13342 13474
13343 if (!to_intel_plane_state(pstate)->visible) 13475 /* Is there platform-specific watermark information to calculate? */
13344 continue; 13476 if (dev_priv->display.compute_global_watermarks)
13477 return dev_priv->display.compute_global_watermarks(state);
13345 13478
13346 intel_state->wm_config.sprites_enabled = true; 13479 return 0;
13347 if (pstate->crtc_w != pstate->src_w >> 16 ||
13348 pstate->crtc_h != pstate->src_h >> 16)
13349 intel_state->wm_config.sprites_scaled = true;
13350 }
13351} 13480}
13352 13481
13353/** 13482/**
@@ -13377,14 +13506,13 @@ static int intel_atomic_check(struct drm_device *dev,
13377 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13506 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13378 crtc_state->mode_changed = true; 13507 crtc_state->mode_changed = true;
13379 13508
13380 if (!crtc_state->enable) { 13509 if (!needs_modeset(crtc_state))
13381 if (needs_modeset(crtc_state))
13382 any_ms = true;
13383 continue; 13510 continue;
13384 }
13385 13511
13386 if (!needs_modeset(crtc_state)) 13512 if (!crtc_state->enable) {
13513 any_ms = true;
13387 continue; 13514 continue;
13515 }
13388 13516
13389 /* FIXME: For only active_changed we shouldn't need to do any 13517 /* FIXME: For only active_changed we shouldn't need to do any
13390 * state recomputation at all. */ 13518 * state recomputation at all. */
@@ -13394,8 +13522,11 @@ static int intel_atomic_check(struct drm_device *dev,
13394 return ret; 13522 return ret;
13395 13523
13396 ret = intel_modeset_pipe_config(crtc, pipe_config); 13524 ret = intel_modeset_pipe_config(crtc, pipe_config);
13397 if (ret) 13525 if (ret) {
13526 intel_dump_pipe_config(to_intel_crtc(crtc),
13527 pipe_config, "[failed]");
13398 return ret; 13528 return ret;
13529 }
13399 13530
13400 if (i915.fastboot && 13531 if (i915.fastboot &&
13401 intel_pipe_config_compare(dev, 13532 intel_pipe_config_compare(dev,
@@ -13405,13 +13536,12 @@ static int intel_atomic_check(struct drm_device *dev,
13405 to_intel_crtc_state(crtc_state)->update_pipe = true; 13536 to_intel_crtc_state(crtc_state)->update_pipe = true;
13406 } 13537 }
13407 13538
13408 if (needs_modeset(crtc_state)) { 13539 if (needs_modeset(crtc_state))
13409 any_ms = true; 13540 any_ms = true;
13410 13541
13411 ret = drm_atomic_add_affected_planes(state, crtc); 13542 ret = drm_atomic_add_affected_planes(state, crtc);
13412 if (ret) 13543 if (ret)
13413 return ret; 13544 return ret;
13414 }
13415 13545
13416 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13546 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13417 needs_modeset(crtc_state) ? 13547 needs_modeset(crtc_state) ?
@@ -13431,9 +13561,7 @@ static int intel_atomic_check(struct drm_device *dev,
13431 return ret; 13561 return ret;
13432 13562
13433 intel_fbc_choose_crtc(dev_priv, state); 13563 intel_fbc_choose_crtc(dev_priv, state);
13434 calc_watermark_data(state); 13564 return calc_watermark_data(state);
13435
13436 return 0;
13437} 13565}
13438 13566
13439static int intel_atomic_prepare_commit(struct drm_device *dev, 13567static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13447,11 +13575,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13447 struct drm_crtc *crtc; 13575 struct drm_crtc *crtc;
13448 int i, ret; 13576 int i, ret;
13449 13577
13450 if (nonblock) {
13451 DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
13452 return -EINVAL;
13453 }
13454
13455 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13578 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13456 if (state->legacy_cursor_update) 13579 if (state->legacy_cursor_update)
13457 continue; 13580 continue;
@@ -13495,6 +13618,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13495 return ret; 13618 return ret;
13496} 13619}
13497 13620
13621u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13622{
13623 struct drm_device *dev = crtc->base.dev;
13624
13625 if (!dev->max_vblank_count)
13626 return drm_accurate_vblank_count(&crtc->base);
13627
13628 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13629}
13630
13498static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13631static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13499 struct drm_i915_private *dev_priv, 13632 struct drm_i915_private *dev_priv,
13500 unsigned crtc_mask) 13633 unsigned crtc_mask)
@@ -13560,45 +13693,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13560 return false; 13693 return false;
13561} 13694}
13562 13695
13563/** 13696static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13564 * intel_atomic_commit - commit validated state object
13565 * @dev: DRM device
13566 * @state: the top-level driver state object
13567 * @nonblock: nonblocking commit
13568 *
13569 * This function commits a top-level state object that has been validated
13570 * with drm_atomic_helper_check().
13571 *
13572 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13573 * we can only handle plane-related operations and do not yet support
13574 * nonblocking commit.
13575 *
13576 * RETURNS
13577 * Zero for success or -errno.
13578 */
13579static int intel_atomic_commit(struct drm_device *dev,
13580 struct drm_atomic_state *state,
13581 bool nonblock)
13582{ 13697{
13698 struct drm_device *dev = state->dev;
13583 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13699 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13584 struct drm_i915_private *dev_priv = dev->dev_private; 13700 struct drm_i915_private *dev_priv = dev->dev_private;
13585 struct drm_crtc_state *old_crtc_state; 13701 struct drm_crtc_state *old_crtc_state;
13586 struct drm_crtc *crtc; 13702 struct drm_crtc *crtc;
13587 struct intel_crtc_state *intel_cstate; 13703 struct intel_crtc_state *intel_cstate;
13588 int ret = 0, i; 13704 struct drm_plane *plane;
13705 struct drm_plane_state *plane_state;
13589 bool hw_check = intel_state->modeset; 13706 bool hw_check = intel_state->modeset;
13590 unsigned long put_domains[I915_MAX_PIPES] = {}; 13707 unsigned long put_domains[I915_MAX_PIPES] = {};
13591 unsigned crtc_vblank_mask = 0; 13708 unsigned crtc_vblank_mask = 0;
13709 int i, ret;
13592 13710
13593 ret = intel_atomic_prepare_commit(dev, state, nonblock); 13711 for_each_plane_in_state(state, plane, plane_state, i) {
13594 if (ret) { 13712 struct intel_plane_state *intel_plane_state =
13595 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13713 to_intel_plane_state(plane_state);
13596 return ret; 13714
13715 if (!intel_plane_state->wait_req)
13716 continue;
13717
13718 ret = __i915_wait_request(intel_plane_state->wait_req,
13719 true, NULL, NULL);
13720 /* EIO should be eaten, and we can't get interrupted in the
13721 * worker, and blocking commits have waited already. */
13722 WARN_ON(ret);
13597 } 13723 }
13598 13724
13599 drm_atomic_helper_swap_state(dev, state); 13725 drm_atomic_helper_wait_for_dependencies(state);
13600 dev_priv->wm.config = intel_state->wm_config;
13601 intel_shared_dpll_commit(state);
13602 13726
13603 if (intel_state->modeset) { 13727 if (intel_state->modeset) {
13604 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13728 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13653,7 +13777,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13653 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13777 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13654 13778
13655 if (dev_priv->display.modeset_commit_cdclk && 13779 if (dev_priv->display.modeset_commit_cdclk &&
13656 intel_state->dev_cdclk != dev_priv->cdclk_freq) 13780 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13781 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13657 dev_priv->display.modeset_commit_cdclk(state); 13782 dev_priv->display.modeset_commit_cdclk(state);
13658 13783
13659 intel_modeset_verify_disabled(dev); 13784 intel_modeset_verify_disabled(dev);
@@ -13665,30 +13790,44 @@ static int intel_atomic_commit(struct drm_device *dev,
13665 bool modeset = needs_modeset(crtc->state); 13790 bool modeset = needs_modeset(crtc->state);
13666 struct intel_crtc_state *pipe_config = 13791 struct intel_crtc_state *pipe_config =
13667 to_intel_crtc_state(crtc->state); 13792 to_intel_crtc_state(crtc->state);
13668 bool update_pipe = !modeset && pipe_config->update_pipe;
13669 13793
13670 if (modeset && crtc->state->active) { 13794 if (modeset && crtc->state->active) {
13671 update_scanline_offset(to_intel_crtc(crtc)); 13795 update_scanline_offset(to_intel_crtc(crtc));
13672 dev_priv->display.crtc_enable(crtc); 13796 dev_priv->display.crtc_enable(crtc);
13673 } 13797 }
13674 13798
13799 /* Complete events for now disable pipes here. */
13800 if (modeset && !crtc->state->active && crtc->state->event) {
13801 spin_lock_irq(&dev->event_lock);
13802 drm_crtc_send_vblank_event(crtc, crtc->state->event);
13803 spin_unlock_irq(&dev->event_lock);
13804
13805 crtc->state->event = NULL;
13806 }
13807
13675 if (!modeset) 13808 if (!modeset)
13676 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13809 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13677 13810
13678 if (crtc->state->active && 13811 if (crtc->state->active &&
13679 drm_atomic_get_existing_plane_state(state, crtc->primary)) 13812 drm_atomic_get_existing_plane_state(state, crtc->primary))
13680 intel_fbc_enable(intel_crtc); 13813 intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
13681 13814
13682 if (crtc->state->active && 13815 if (crtc->state->active)
13683 (crtc->state->planes_changed || update_pipe))
13684 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 13816 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13685 13817
13686 if (pipe_config->base.active && needs_vblank_wait(pipe_config)) 13818 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13687 crtc_vblank_mask |= 1 << i; 13819 crtc_vblank_mask |= 1 << i;
13688 } 13820 }
13689 13821
13690 /* FIXME: add subpixel order */ 13822 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13691 13823 * already, but still need the state for the delayed optimization. To
13824 * fix this:
13825 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13826 * - schedule that vblank worker _before_ calling hw_done
13827 * - at the start of commit_tail, cancel it _synchrously
13828 * - switch over to the vblank wait helper in the core after that since
13829 * we don't need out special handling any more.
13830 */
13692 if (!state->legacy_cursor_update) 13831 if (!state->legacy_cursor_update)
13693 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13832 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13694 13833
@@ -13715,6 +13854,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13715 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13854 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13716 } 13855 }
13717 13856
13857 drm_atomic_helper_commit_hw_done(state);
13858
13718 if (intel_state->modeset) 13859 if (intel_state->modeset)
13719 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13860 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13720 13861
@@ -13722,6 +13863,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13722 drm_atomic_helper_cleanup_planes(dev, state); 13863 drm_atomic_helper_cleanup_planes(dev, state);
13723 mutex_unlock(&dev->struct_mutex); 13864 mutex_unlock(&dev->struct_mutex);
13724 13865
13866 drm_atomic_helper_commit_cleanup_done(state);
13867
13725 drm_atomic_state_free(state); 13868 drm_atomic_state_free(state);
13726 13869
13727 /* As one of the primary mmio accessors, KMS has a high likelihood 13870 /* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13736,6 +13879,86 @@ static int intel_atomic_commit(struct drm_device *dev,
13736 * can happen also when the device is completely off. 13879 * can happen also when the device is completely off.
13737 */ 13880 */
13738 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 13881 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13882}
13883
13884static void intel_atomic_commit_work(struct work_struct *work)
13885{
13886 struct drm_atomic_state *state = container_of(work,
13887 struct drm_atomic_state,
13888 commit_work);
13889 intel_atomic_commit_tail(state);
13890}
13891
13892static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13893{
13894 struct drm_plane_state *old_plane_state;
13895 struct drm_plane *plane;
13896 struct drm_i915_gem_object *obj, *old_obj;
13897 struct intel_plane *intel_plane;
13898 int i;
13899
13900 mutex_lock(&state->dev->struct_mutex);
13901 for_each_plane_in_state(state, plane, old_plane_state, i) {
13902 obj = intel_fb_obj(plane->state->fb);
13903 old_obj = intel_fb_obj(old_plane_state->fb);
13904 intel_plane = to_intel_plane(plane);
13905
13906 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13907 }
13908 mutex_unlock(&state->dev->struct_mutex);
13909}
13910
13911/**
13912 * intel_atomic_commit - commit validated state object
13913 * @dev: DRM device
13914 * @state: the top-level driver state object
13915 * @nonblock: nonblocking commit
13916 *
13917 * This function commits a top-level state object that has been validated
13918 * with drm_atomic_helper_check().
13919 *
13920 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13921 * nonblocking commits are only safe for pure plane updates. Everything else
13922 * should work though.
13923 *
13924 * RETURNS
13925 * Zero for success or -errno.
13926 */
13927static int intel_atomic_commit(struct drm_device *dev,
13928 struct drm_atomic_state *state,
13929 bool nonblock)
13930{
13931 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13932 struct drm_i915_private *dev_priv = dev->dev_private;
13933 int ret = 0;
13934
13935 if (intel_state->modeset && nonblock) {
13936 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13937 return -EINVAL;
13938 }
13939
13940 ret = drm_atomic_helper_setup_commit(state, nonblock);
13941 if (ret)
13942 return ret;
13943
13944 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13945
13946 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13947 if (ret) {
13948 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13949 return ret;
13950 }
13951
13952 drm_atomic_helper_swap_state(state, true);
13953 dev_priv->wm.distrust_bios_wm = false;
13954 dev_priv->wm.skl_results = intel_state->wm_results;
13955 intel_shared_dpll_commit(state);
13956 intel_atomic_track_fbs(state);
13957
13958 if (nonblock)
13959 queue_work(system_unbound_wq, &state->commit_work);
13960 else
13961 intel_atomic_commit_tail(state);
13739 13962
13740 return 0; 13963 return 0;
13741} 13964}
@@ -13749,8 +13972,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
13749 13972
13750 state = drm_atomic_state_alloc(dev); 13973 state = drm_atomic_state_alloc(dev);
13751 if (!state) { 13974 if (!state) {
13752 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13975 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13753 crtc->base.id); 13976 crtc->base.id, crtc->name);
13754 return; 13977 return;
13755 } 13978 }
13756 13979
@@ -13785,7 +14008,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
13785 .set_config = drm_atomic_helper_set_config, 14008 .set_config = drm_atomic_helper_set_config,
13786 .set_property = drm_atomic_helper_crtc_set_property, 14009 .set_property = drm_atomic_helper_crtc_set_property,
13787 .destroy = intel_crtc_destroy, 14010 .destroy = intel_crtc_destroy,
13788 .page_flip = intel_crtc_page_flip, 14011 .page_flip = drm_atomic_helper_page_flip,
13789 .atomic_duplicate_state = intel_crtc_duplicate_state, 14012 .atomic_duplicate_state = intel_crtc_duplicate_state,
13790 .atomic_destroy_state = intel_crtc_destroy_state, 14013 .atomic_destroy_state = intel_crtc_destroy_state,
13791}; 14014};
@@ -13810,9 +14033,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13810{ 14033{
13811 struct drm_device *dev = plane->dev; 14034 struct drm_device *dev = plane->dev;
13812 struct drm_framebuffer *fb = new_state->fb; 14035 struct drm_framebuffer *fb = new_state->fb;
13813 struct intel_plane *intel_plane = to_intel_plane(plane);
13814 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14036 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13815 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14037 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14038 struct reservation_object *resv;
13816 int ret = 0; 14039 int ret = 0;
13817 14040
13818 if (!obj && !old_obj) 14041 if (!obj && !old_obj)
@@ -13842,12 +14065,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13842 } 14065 }
13843 } 14066 }
13844 14067
14068 if (!obj)
14069 return 0;
14070
13845 /* For framebuffer backed by dmabuf, wait for fence */ 14071 /* For framebuffer backed by dmabuf, wait for fence */
13846 if (obj && obj->base.dma_buf) { 14072 resv = i915_gem_object_get_dmabuf_resv(obj);
14073 if (resv) {
13847 long lret; 14074 long lret;
13848 14075
13849 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 14076 lret = reservation_object_wait_timeout_rcu(resv, false, true,
13850 false, true,
13851 MAX_SCHEDULE_TIMEOUT); 14077 MAX_SCHEDULE_TIMEOUT);
13852 if (lret == -ERESTARTSYS) 14078 if (lret == -ERESTARTSYS)
13853 return lret; 14079 return lret;
@@ -13855,9 +14081,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13855 WARN(lret < 0, "waiting returns %li\n", lret); 14081 WARN(lret < 0, "waiting returns %li\n", lret);
13856 } 14082 }
13857 14083
13858 if (!obj) { 14084 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13859 ret = 0;
13860 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13861 INTEL_INFO(dev)->cursor_needs_physical) { 14085 INTEL_INFO(dev)->cursor_needs_physical) {
13862 int align = IS_I830(dev) ? 16 * 1024 : 256; 14086 int align = IS_I830(dev) ? 16 * 1024 : 256;
13863 ret = i915_gem_object_attach_phys(obj, align); 14087 ret = i915_gem_object_attach_phys(obj, align);
@@ -13868,15 +14092,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13868 } 14092 }
13869 14093
13870 if (ret == 0) { 14094 if (ret == 0) {
13871 if (obj) { 14095 struct intel_plane_state *plane_state =
13872 struct intel_plane_state *plane_state = 14096 to_intel_plane_state(new_state);
13873 to_intel_plane_state(new_state);
13874
13875 i915_gem_request_assign(&plane_state->wait_req,
13876 obj->last_write_req);
13877 }
13878 14097
13879 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 14098 i915_gem_request_assign(&plane_state->wait_req,
14099 obj->last_write_req);
13880 } 14100 }
13881 14101
13882 return ret; 14102 return ret;
@@ -13896,7 +14116,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13896 const struct drm_plane_state *old_state) 14116 const struct drm_plane_state *old_state)
13897{ 14117{
13898 struct drm_device *dev = plane->dev; 14118 struct drm_device *dev = plane->dev;
13899 struct intel_plane *intel_plane = to_intel_plane(plane);
13900 struct intel_plane_state *old_intel_state; 14119 struct intel_plane_state *old_intel_state;
13901 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 14120 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13902 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 14121 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -13910,11 +14129,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13910 !INTEL_INFO(dev)->cursor_needs_physical)) 14129 !INTEL_INFO(dev)->cursor_needs_physical))
13911 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14130 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13912 14131
13913 /* prepare_fb aborted? */
13914 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13915 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13916 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13917
13918 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 14132 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13919} 14133}
13920 14134
@@ -13970,6 +14184,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13970 14184
13971 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14185 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13972 &state->dst, &state->clip, 14186 &state->dst, &state->clip,
14187 state->base.rotation,
13973 min_scale, max_scale, 14188 min_scale, max_scale,
13974 can_position, true, 14189 can_position, true,
13975 &state->visible); 14190 &state->visible);
@@ -14006,7 +14221,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14006{ 14221{
14007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14008 14223
14009 intel_pipe_update_end(intel_crtc); 14224 intel_pipe_update_end(intel_crtc, NULL);
14010} 14225}
14011 14226
14012/** 14227/**
@@ -14018,9 +14233,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14018 */ 14233 */
14019void intel_plane_destroy(struct drm_plane *plane) 14234void intel_plane_destroy(struct drm_plane *plane)
14020{ 14235{
14021 struct intel_plane *intel_plane = to_intel_plane(plane); 14236 if (!plane)
14237 return;
14238
14022 drm_plane_cleanup(plane); 14239 drm_plane_cleanup(plane);
14023 kfree(intel_plane); 14240 kfree(to_intel_plane(plane));
14024} 14241}
14025 14242
14026const struct drm_plane_funcs intel_plane_funcs = { 14243const struct drm_plane_funcs intel_plane_funcs = {
@@ -14092,10 +14309,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14092 primary->disable_plane = i9xx_disable_primary_plane; 14309 primary->disable_plane = i9xx_disable_primary_plane;
14093 } 14310 }
14094 14311
14095 ret = drm_universal_plane_init(dev, &primary->base, 0, 14312 if (INTEL_INFO(dev)->gen >= 9)
14096 &intel_plane_funcs, 14313 ret = drm_universal_plane_init(dev, &primary->base, 0,
14097 intel_primary_formats, num_formats, 14314 &intel_plane_funcs,
14098 DRM_PLANE_TYPE_PRIMARY, NULL); 14315 intel_primary_formats, num_formats,
14316 DRM_PLANE_TYPE_PRIMARY,
14317 "plane 1%c", pipe_name(pipe));
14318 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14319 ret = drm_universal_plane_init(dev, &primary->base, 0,
14320 &intel_plane_funcs,
14321 intel_primary_formats, num_formats,
14322 DRM_PLANE_TYPE_PRIMARY,
14323 "primary %c", pipe_name(pipe));
14324 else
14325 ret = drm_universal_plane_init(dev, &primary->base, 0,
14326 &intel_plane_funcs,
14327 intel_primary_formats, num_formats,
14328 DRM_PLANE_TYPE_PRIMARY,
14329 "plane %c", plane_name(primary->plane));
14099 if (ret) 14330 if (ret)
14100 goto fail; 14331 goto fail;
14101 14332
@@ -14145,6 +14376,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
14145 14376
14146 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14377 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14147 &state->dst, &state->clip, 14378 &state->dst, &state->clip,
14379 state->base.rotation,
14148 DRM_PLANE_HELPER_NO_SCALING, 14380 DRM_PLANE_HELPER_NO_SCALING,
14149 DRM_PLANE_HELPER_NO_SCALING, 14381 DRM_PLANE_HELPER_NO_SCALING,
14150 true, true, &state->visible); 14382 true, true, &state->visible);
@@ -14253,7 +14485,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14253 &intel_plane_funcs, 14485 &intel_plane_funcs,
14254 intel_cursor_formats, 14486 intel_cursor_formats,
14255 ARRAY_SIZE(intel_cursor_formats), 14487 ARRAY_SIZE(intel_cursor_formats),
14256 DRM_PLANE_TYPE_CURSOR, NULL); 14488 DRM_PLANE_TYPE_CURSOR,
14489 "cursor %c", pipe_name(pipe));
14257 if (ret) 14490 if (ret)
14258 goto fail; 14491 goto fail;
14259 14492
@@ -14338,7 +14571,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14338 goto fail; 14571 goto fail;
14339 14572
14340 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14573 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14341 cursor, &intel_crtc_funcs, NULL); 14574 cursor, &intel_crtc_funcs,
14575 "pipe %c", pipe_name(pipe));
14342 if (ret) 14576 if (ret)
14343 goto fail; 14577 goto fail;
14344 14578
@@ -14372,10 +14606,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14372 return; 14606 return;
14373 14607
14374fail: 14608fail:
14375 if (primary) 14609 intel_plane_destroy(primary);
14376 drm_plane_cleanup(primary); 14610 intel_plane_destroy(cursor);
14377 if (cursor)
14378 drm_plane_cleanup(cursor);
14379 kfree(crtc_state); 14611 kfree(crtc_state);
14380 kfree(intel_crtc); 14612 kfree(intel_crtc);
14381} 14613}
@@ -14554,6 +14786,8 @@ static void intel_setup_outputs(struct drm_device *dev)
14554 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14786 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14555 intel_dp_init(dev, PCH_DP_D, PORT_D); 14787 intel_dp_init(dev, PCH_DP_D, PORT_D);
14556 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14788 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14789 bool has_edp, has_port;
14790
14557 /* 14791 /*
14558 * The DP_DETECTED bit is the latched state of the DDC 14792 * The DP_DETECTED bit is the latched state of the DDC
14559 * SDA pin at boot. However since eDP doesn't require DDC 14793 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14562,27 +14796,37 @@ static void intel_setup_outputs(struct drm_device *dev)
14562 * Thus we can't rely on the DP_DETECTED bit alone to detect 14796 * Thus we can't rely on the DP_DETECTED bit alone to detect
14563 * eDP ports. Consult the VBT as well as DP_DETECTED to 14797 * eDP ports. Consult the VBT as well as DP_DETECTED to
14564 * detect eDP ports. 14798 * detect eDP ports.
14799 *
14800 * Sadly the straps seem to be missing sometimes even for HDMI
14801 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14802 * and VBT for the presence of the port. Additionally we can't
14803 * trust the port type the VBT declares as we've seen at least
14804 * HDMI ports that the VBT claim are DP or eDP.
14565 */ 14805 */
14566 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14806 has_edp = intel_dp_is_edp(dev, PORT_B);
14567 !intel_dp_is_edp(dev, PORT_B)) 14807 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14808 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14809 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14810 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14568 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14811 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14569 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14570 intel_dp_is_edp(dev, PORT_B))
14571 intel_dp_init(dev, VLV_DP_B, PORT_B);
14572 14812
14573 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14813 has_edp = intel_dp_is_edp(dev, PORT_C);
14574 !intel_dp_is_edp(dev, PORT_C)) 14814 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14815 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14816 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14817 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14575 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14818 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14576 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14577 intel_dp_is_edp(dev, PORT_C))
14578 intel_dp_init(dev, VLV_DP_C, PORT_C);
14579 14819
14580 if (IS_CHERRYVIEW(dev)) { 14820 if (IS_CHERRYVIEW(dev)) {
14581 /* eDP not supported on port D, so don't check VBT */ 14821 /*
14582 if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14822 * eDP not supported on port D,
14583 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14823 * so no need to worry about it
14584 if (I915_READ(CHV_DP_D) & DP_DETECTED) 14824 */
14825 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14826 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14585 intel_dp_init(dev, CHV_DP_D, PORT_D); 14827 intel_dp_init(dev, CHV_DP_D, PORT_D);
14828 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14829 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14586 } 14830 }
14587 14831
14588 intel_dsi_init(dev); 14832 intel_dsi_init(dev);
@@ -15050,12 +15294,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15050 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15294 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15051 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15295 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15052 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15296 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15053 if (IS_BROADWELL(dev_priv)) { 15297 }
15054 dev_priv->display.modeset_commit_cdclk = 15298
15055 broadwell_modeset_commit_cdclk; 15299 if (IS_BROADWELL(dev_priv)) {
15056 dev_priv->display.modeset_calc_cdclk = 15300 dev_priv->display.modeset_commit_cdclk =
15057 broadwell_modeset_calc_cdclk; 15301 broadwell_modeset_commit_cdclk;
15058 } 15302 dev_priv->display.modeset_calc_cdclk =
15303 broadwell_modeset_calc_cdclk;
15059 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15304 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15060 dev_priv->display.modeset_commit_cdclk = 15305 dev_priv->display.modeset_commit_cdclk =
15061 valleyview_modeset_commit_cdclk; 15306 valleyview_modeset_commit_cdclk;
@@ -15063,9 +15308,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15063 valleyview_modeset_calc_cdclk; 15308 valleyview_modeset_calc_cdclk;
15064 } else if (IS_BROXTON(dev_priv)) { 15309 } else if (IS_BROXTON(dev_priv)) {
15065 dev_priv->display.modeset_commit_cdclk = 15310 dev_priv->display.modeset_commit_cdclk =
15066 broxton_modeset_commit_cdclk; 15311 bxt_modeset_commit_cdclk;
15312 dev_priv->display.modeset_calc_cdclk =
15313 bxt_modeset_calc_cdclk;
15314 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15315 dev_priv->display.modeset_commit_cdclk =
15316 skl_modeset_commit_cdclk;
15067 dev_priv->display.modeset_calc_cdclk = 15317 dev_priv->display.modeset_calc_cdclk =
15068 broxton_modeset_calc_cdclk; 15318 skl_modeset_calc_cdclk;
15069 } 15319 }
15070 15320
15071 switch (INTEL_INFO(dev_priv)->gen) { 15321 switch (INTEL_INFO(dev_priv)->gen) {
@@ -15293,7 +15543,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
15293 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15543 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15294 15544
15295 intel_init_clock_gating(dev); 15545 intel_init_clock_gating(dev);
15296 intel_enable_gt_powersave(dev); 15546 intel_enable_gt_powersave(dev_priv);
15297} 15547}
15298 15548
15299/* 15549/*
@@ -15363,7 +15613,6 @@ retry:
15363 } 15613 }
15364 15614
15365 /* Write calculated watermark values back */ 15615 /* Write calculated watermark values back */
15366 to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15367 for_each_crtc_in_state(state, crtc, cstate, i) { 15616 for_each_crtc_in_state(state, crtc, cstate, i) {
15368 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15617 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15369 15618
@@ -15461,11 +15710,13 @@ void intel_modeset_init(struct drm_device *dev)
15461 } 15710 }
15462 15711
15463 intel_update_czclk(dev_priv); 15712 intel_update_czclk(dev_priv);
15464 intel_update_rawclk(dev_priv);
15465 intel_update_cdclk(dev); 15713 intel_update_cdclk(dev);
15466 15714
15467 intel_shared_dpll_init(dev); 15715 intel_shared_dpll_init(dev);
15468 15716
15717 if (dev_priv->max_cdclk_freq == 0)
15718 intel_update_max_cdclk(dev);
15719
15469 /* Just disable it once at startup */ 15720 /* Just disable it once at startup */
15470 i915_disable_vga(dev); 15721 i915_disable_vga(dev);
15471 intel_setup_outputs(dev); 15722 intel_setup_outputs(dev);
@@ -15606,8 +15857,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15606 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15857 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15607 bool plane; 15858 bool plane;
15608 15859
15609 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15860 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15610 crtc->base.base.id); 15861 crtc->base.base.id, crtc->base.name);
15611 15862
15612 /* Pipe has the wrong plane attached and the plane is active. 15863 /* Pipe has the wrong plane attached and the plane is active.
15613 * Temporarily change the plane mapping and disable everything 15864 * Temporarily change the plane mapping and disable everything
@@ -15775,26 +16026,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15775 if (crtc_state->base.active) { 16026 if (crtc_state->base.active) {
15776 dev_priv->active_crtcs |= 1 << crtc->pipe; 16027 dev_priv->active_crtcs |= 1 << crtc->pipe;
15777 16028
15778 if (IS_BROADWELL(dev_priv)) { 16029 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15779 pixclk = ilk_pipe_pixel_rate(crtc_state); 16030 pixclk = ilk_pipe_pixel_rate(crtc_state);
15780 16031 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15781 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15782 if (crtc_state->ips_enabled)
15783 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15784 } else if (IS_VALLEYVIEW(dev_priv) ||
15785 IS_CHERRYVIEW(dev_priv) ||
15786 IS_BROXTON(dev_priv))
15787 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 16032 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15788 else 16033 else
15789 WARN_ON(dev_priv->display.modeset_calc_cdclk); 16034 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16035
16036 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16037 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16038 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15790 } 16039 }
15791 16040
15792 dev_priv->min_pixclk[crtc->pipe] = pixclk; 16041 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15793 16042
15794 readout_plane_state(crtc); 16043 readout_plane_state(crtc);
15795 16044
15796 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 16045 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15797 crtc->base.base.id, 16046 crtc->base.base.id, crtc->base.name,
15798 crtc->active ? "enabled" : "disabled"); 16047 crtc->active ? "enabled" : "disabled");
15799 } 16048 }
15800 16049
@@ -16025,15 +16274,16 @@ retry:
16025 16274
16026void intel_modeset_gem_init(struct drm_device *dev) 16275void intel_modeset_gem_init(struct drm_device *dev)
16027{ 16276{
16277 struct drm_i915_private *dev_priv = to_i915(dev);
16028 struct drm_crtc *c; 16278 struct drm_crtc *c;
16029 struct drm_i915_gem_object *obj; 16279 struct drm_i915_gem_object *obj;
16030 int ret; 16280 int ret;
16031 16281
16032 intel_init_gt_powersave(dev); 16282 intel_init_gt_powersave(dev_priv);
16033 16283
16034 intel_modeset_init_hw(dev); 16284 intel_modeset_init_hw(dev);
16035 16285
16036 intel_setup_overlay(dev); 16286 intel_setup_overlay(dev_priv);
16037 16287
16038 /* 16288 /*
16039 * Make sure any fbs we allocated at startup are properly 16289 * Make sure any fbs we allocated at startup are properly
@@ -16063,22 +16313,19 @@ void intel_modeset_gem_init(struct drm_device *dev)
16063 intel_backlight_register(dev); 16313 intel_backlight_register(dev);
16064} 16314}
16065 16315
16066void intel_connector_unregister(struct intel_connector *intel_connector) 16316void intel_connector_unregister(struct drm_connector *connector)
16067{ 16317{
16068 struct drm_connector *connector = &intel_connector->base; 16318 struct intel_connector *intel_connector = to_intel_connector(connector);
16069 16319
16320 intel_backlight_device_unregister(intel_connector);
16070 intel_panel_destroy_backlight(connector); 16321 intel_panel_destroy_backlight(connector);
16071 drm_connector_unregister(connector);
16072} 16322}
16073 16323
16074void intel_modeset_cleanup(struct drm_device *dev) 16324void intel_modeset_cleanup(struct drm_device *dev)
16075{ 16325{
16076 struct drm_i915_private *dev_priv = dev->dev_private; 16326 struct drm_i915_private *dev_priv = dev->dev_private;
16077 struct intel_connector *connector;
16078
16079 intel_disable_gt_powersave(dev);
16080 16327
16081 intel_backlight_unregister(dev); 16328 intel_disable_gt_powersave(dev_priv);
16082 16329
16083 /* 16330 /*
16084 * Interrupts and polling as the first thing to avoid creating havoc. 16331 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -16100,27 +16347,17 @@ void intel_modeset_cleanup(struct drm_device *dev)
16100 /* flush any delayed tasks or pending work */ 16347 /* flush any delayed tasks or pending work */
16101 flush_scheduled_work(); 16348 flush_scheduled_work();
16102 16349
16103 /* destroy the backlight and sysfs files before encoders/connectors */ 16350 drm_connector_unregister_all(dev);
16104 for_each_intel_connector(dev, connector)
16105 connector->unregister(connector);
16106 16351
16107 drm_mode_config_cleanup(dev); 16352 drm_mode_config_cleanup(dev);
16108 16353
16109 intel_cleanup_overlay(dev); 16354 intel_cleanup_overlay(dev_priv);
16110 16355
16111 intel_cleanup_gt_powersave(dev); 16356 intel_cleanup_gt_powersave(dev_priv);
16112 16357
16113 intel_teardown_gmbus(dev); 16358 intel_teardown_gmbus(dev);
16114} 16359}
16115 16360
16116/*
16117 * Return which encoder is currently attached for connector.
16118 */
16119struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16120{
16121 return &intel_attached_encoder(connector)->base;
16122}
16123
16124void intel_connector_attach_encoder(struct intel_connector *connector, 16361void intel_connector_attach_encoder(struct intel_connector *connector,
16125 struct intel_encoder *encoder) 16362 struct intel_encoder *encoder)
16126{ 16363{
@@ -16204,9 +16441,8 @@ struct intel_display_error_state {
16204}; 16441};
16205 16442
16206struct intel_display_error_state * 16443struct intel_display_error_state *
16207intel_display_capture_error_state(struct drm_device *dev) 16444intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16208{ 16445{
16209 struct drm_i915_private *dev_priv = dev->dev_private;
16210 struct intel_display_error_state *error; 16446 struct intel_display_error_state *error;
16211 int transcoders[] = { 16447 int transcoders[] = {
16212 TRANSCODER_A, 16448 TRANSCODER_A,
@@ -16216,14 +16452,14 @@ intel_display_capture_error_state(struct drm_device *dev)
16216 }; 16452 };
16217 int i; 16453 int i;
16218 16454
16219 if (INTEL_INFO(dev)->num_pipes == 0) 16455 if (INTEL_INFO(dev_priv)->num_pipes == 0)
16220 return NULL; 16456 return NULL;
16221 16457
16222 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16458 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16223 if (error == NULL) 16459 if (error == NULL)
16224 return NULL; 16460 return NULL;
16225 16461
16226 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16462 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16227 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16463 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16228 16464
16229 for_each_pipe(dev_priv, i) { 16465 for_each_pipe(dev_priv, i) {
@@ -16239,25 +16475,25 @@ intel_display_capture_error_state(struct drm_device *dev)
16239 16475
16240 error->plane[i].control = I915_READ(DSPCNTR(i)); 16476 error->plane[i].control = I915_READ(DSPCNTR(i));
16241 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16477 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16242 if (INTEL_INFO(dev)->gen <= 3) { 16478 if (INTEL_GEN(dev_priv) <= 3) {
16243 error->plane[i].size = I915_READ(DSPSIZE(i)); 16479 error->plane[i].size = I915_READ(DSPSIZE(i));
16244 error->plane[i].pos = I915_READ(DSPPOS(i)); 16480 error->plane[i].pos = I915_READ(DSPPOS(i));
16245 } 16481 }
16246 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16482 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16247 error->plane[i].addr = I915_READ(DSPADDR(i)); 16483 error->plane[i].addr = I915_READ(DSPADDR(i));
16248 if (INTEL_INFO(dev)->gen >= 4) { 16484 if (INTEL_GEN(dev_priv) >= 4) {
16249 error->plane[i].surface = I915_READ(DSPSURF(i)); 16485 error->plane[i].surface = I915_READ(DSPSURF(i));
16250 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16486 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16251 } 16487 }
16252 16488
16253 error->pipe[i].source = I915_READ(PIPESRC(i)); 16489 error->pipe[i].source = I915_READ(PIPESRC(i));
16254 16490
16255 if (HAS_GMCH_DISPLAY(dev)) 16491 if (HAS_GMCH_DISPLAY(dev_priv))
16256 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16492 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16257 } 16493 }
16258 16494
16259 /* Note: this does not include DSI transcoders. */ 16495 /* Note: this does not include DSI transcoders. */
16260 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16496 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16261 if (HAS_DDI(dev_priv)) 16497 if (HAS_DDI(dev_priv))
16262 error->num_transcoders++; /* Account for eDP. */ 16498 error->num_transcoders++; /* Account for eDP. */
16263 16499
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f192f58708c2..ffa43eca14d3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe); 131 enum pipe pipe);
132static void intel_dp_unset_edid(struct intel_dp *intel_dp); 132static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133 133
134static unsigned int intel_dp_unused_lane_mask(int lane_count)
135{
136 return ~((1 << lane_count) - 1) & 0xf;
137}
138
139static int 134static int
140intel_dp_max_link_bw(struct intel_dp *intel_dp) 135intel_dp_max_link_bw(struct intel_dp *intel_dp)
141{ 136{
@@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 DP_AUX_CH_CTL_TIME_OUT_1600us | 770 DP_AUX_CH_CTL_TIME_OUT_1600us |
776 DP_AUX_CH_CTL_RECEIVE_ERROR | 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
777 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
778 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 774 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
779} 775}
780 776
@@ -1181,7 +1177,6 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
1181static void 1177static void
1182intel_dp_aux_fini(struct intel_dp *intel_dp) 1178intel_dp_aux_fini(struct intel_dp *intel_dp)
1183{ 1179{
1184 drm_dp_aux_unregister(&intel_dp->aux);
1185 kfree(intel_dp->aux.name); 1180 kfree(intel_dp->aux.name);
1186} 1181}
1187 1182
@@ -1216,15 +1211,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1216 return 0; 1211 return 0;
1217} 1212}
1218 1213
1219static void
1220intel_dp_connector_unregister(struct intel_connector *intel_connector)
1221{
1222 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1223
1224 intel_dp_aux_fini(intel_dp);
1225 intel_connector_unregister(intel_connector);
1226}
1227
1228static int 1214static int
1229intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) 1215intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1230{ 1216{
@@ -1582,6 +1568,27 @@ found:
1582 &pipe_config->dp_m2_n2); 1568 &pipe_config->dp_m2_n2);
1583 } 1569 }
1584 1570
1571 /*
1572 * DPLL0 VCO may need to be adjusted to get the correct
1573 * clock for eDP. This will affect cdclk as well.
1574 */
1575 if (is_edp(intel_dp) &&
1576 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1577 int vco;
1578
1579 switch (pipe_config->port_clock / 2) {
1580 case 108000:
1581 case 216000:
1582 vco = 8640000;
1583 break;
1584 default:
1585 vco = 8100000;
1586 break;
1587 }
1588
1589 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1590 }
1591
1585 if (!HAS_DDI(dev)) 1592 if (!HAS_DDI(dev))
1586 intel_dp_set_clock(encoder, pipe_config); 1593 intel_dp_set_clock(encoder, pipe_config);
1587 1594
@@ -2460,50 +2467,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
2460 intel_dp_link_down(intel_dp); 2467 intel_dp_link_down(intel_dp);
2461} 2468}
2462 2469
2463static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2464 bool reset)
2465{
2466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2467 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2468 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2469 enum pipe pipe = crtc->pipe;
2470 uint32_t val;
2471
2472 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2473 if (reset)
2474 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2475 else
2476 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2477 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2478
2479 if (crtc->config->lane_count > 2) {
2480 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2481 if (reset)
2482 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2483 else
2484 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2485 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2486 }
2487
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2489 val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 if (reset)
2491 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2492 else
2493 val |= DPIO_PCS_CLK_SOFT_RESET;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2495
2496 if (crtc->config->lane_count > 2) {
2497 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2498 val |= CHV_PCS_REQ_SOFTRESET_EN;
2499 if (reset)
2500 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2501 else
2502 val |= DPIO_PCS_CLK_SOFT_RESET;
2503 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2504 }
2505}
2506
2507static void chv_post_disable_dp(struct intel_encoder *encoder) 2470static void chv_post_disable_dp(struct intel_encoder *encoder)
2508{ 2471{
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2811,266 +2774,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2811 2774
2812static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2775static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2813{ 2776{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2777 vlv_phy_pre_encoder_enable(encoder);
2815 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2816 struct drm_device *dev = encoder->base.dev;
2817 struct drm_i915_private *dev_priv = dev->dev_private;
2818 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel port = vlv_dport_to_channel(dport);
2820 int pipe = intel_crtc->pipe;
2821 u32 val;
2822
2823 mutex_lock(&dev_priv->sb_lock);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2826 val = 0;
2827 if (pipe)
2828 val |= (1<<21);
2829 else
2830 val &= ~(1<<21);
2831 val |= 0x001000c4;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2833 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2835
2836 mutex_unlock(&dev_priv->sb_lock);
2837 2778
2838 intel_enable_dp(encoder); 2779 intel_enable_dp(encoder);
2839} 2780}
2840 2781
2841static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2782static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2842{ 2783{
2843 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2844 struct drm_device *dev = encoder->base.dev;
2845 struct drm_i915_private *dev_priv = dev->dev_private;
2846 struct intel_crtc *intel_crtc =
2847 to_intel_crtc(encoder->base.crtc);
2848 enum dpio_channel port = vlv_dport_to_channel(dport);
2849 int pipe = intel_crtc->pipe;
2850
2851 intel_dp_prepare(encoder); 2784 intel_dp_prepare(encoder);
2852 2785
2853 /* Program Tx lane resets to default */ 2786 vlv_phy_pre_pll_enable(encoder);
2854 mutex_lock(&dev_priv->sb_lock);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2856 DPIO_PCS_TX_LANE2_RESET |
2857 DPIO_PCS_TX_LANE1_RESET);
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2859 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2860 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2861 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2862 DPIO_PCS_CLK_SOFT_RESET);
2863
2864 /* Fix up inter-pair skew failure */
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2866 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2867 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2868 mutex_unlock(&dev_priv->sb_lock);
2869} 2787}
2870 2788
2871static void chv_pre_enable_dp(struct intel_encoder *encoder) 2789static void chv_pre_enable_dp(struct intel_encoder *encoder)
2872{ 2790{
2873 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2791 chv_phy_pre_encoder_enable(encoder);
2874 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2875 struct drm_device *dev = encoder->base.dev;
2876 struct drm_i915_private *dev_priv = dev->dev_private;
2877 struct intel_crtc *intel_crtc =
2878 to_intel_crtc(encoder->base.crtc);
2879 enum dpio_channel ch = vlv_dport_to_channel(dport);
2880 int pipe = intel_crtc->pipe;
2881 int data, i, stagger;
2882 u32 val;
2883
2884 mutex_lock(&dev_priv->sb_lock);
2885
2886 /* allow hardware to manage TX FIFO reset source */
2887 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2888 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2889 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2890
2891 if (intel_crtc->config->lane_count > 2) {
2892 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2893 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2894 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2895 }
2896
2897 /* Program Tx lane latency optimal setting*/
2898 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2899 /* Set the upar bit */
2900 if (intel_crtc->config->lane_count == 1)
2901 data = 0x0;
2902 else
2903 data = (i == 1) ? 0x0 : 0x1;
2904 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2905 data << DPIO_UPAR_SHIFT);
2906 }
2907
2908 /* Data lane stagger programming */
2909 if (intel_crtc->config->port_clock > 270000)
2910 stagger = 0x18;
2911 else if (intel_crtc->config->port_clock > 135000)
2912 stagger = 0xd;
2913 else if (intel_crtc->config->port_clock > 67500)
2914 stagger = 0x7;
2915 else if (intel_crtc->config->port_clock > 33750)
2916 stagger = 0x4;
2917 else
2918 stagger = 0x2;
2919
2920 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2921 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2922 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2923
2924 if (intel_crtc->config->lane_count > 2) {
2925 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2926 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2928 }
2929
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2931 DPIO_LANESTAGGER_STRAP(stagger) |
2932 DPIO_LANESTAGGER_STRAP_OVRD |
2933 DPIO_TX1_STAGGER_MASK(0x1f) |
2934 DPIO_TX1_STAGGER_MULT(6) |
2935 DPIO_TX2_STAGGER_MULT(0));
2936
2937 if (intel_crtc->config->lane_count > 2) {
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2939 DPIO_LANESTAGGER_STRAP(stagger) |
2940 DPIO_LANESTAGGER_STRAP_OVRD |
2941 DPIO_TX1_STAGGER_MASK(0x1f) |
2942 DPIO_TX1_STAGGER_MULT(7) |
2943 DPIO_TX2_STAGGER_MULT(5));
2944 }
2945
2946 /* Deassert data lane reset */
2947 chv_data_lane_soft_reset(encoder, false);
2948
2949 mutex_unlock(&dev_priv->sb_lock);
2950 2792
2951 intel_enable_dp(encoder); 2793 intel_enable_dp(encoder);
2952 2794
2953 /* Second common lane will stay alive on its own now */ 2795 /* Second common lane will stay alive on its own now */
2954 if (dport->release_cl2_override) { 2796 chv_phy_release_cl2_override(encoder);
2955 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2956 dport->release_cl2_override = false;
2957 }
2958} 2797}
2959 2798
2960static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2799static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2961{ 2800{
2962 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2963 struct drm_device *dev = encoder->base.dev;
2964 struct drm_i915_private *dev_priv = dev->dev_private;
2965 struct intel_crtc *intel_crtc =
2966 to_intel_crtc(encoder->base.crtc);
2967 enum dpio_channel ch = vlv_dport_to_channel(dport);
2968 enum pipe pipe = intel_crtc->pipe;
2969 unsigned int lane_mask =
2970 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2971 u32 val;
2972
2973 intel_dp_prepare(encoder); 2801 intel_dp_prepare(encoder);
2974 2802
2975 /* 2803 chv_phy_pre_pll_enable(encoder);
2976 * Must trick the second common lane into life.
2977 * Otherwise we can't even access the PLL.
2978 */
2979 if (ch == DPIO_CH0 && pipe == PIPE_B)
2980 dport->release_cl2_override =
2981 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2982
2983 chv_phy_powergate_lanes(encoder, true, lane_mask);
2984
2985 mutex_lock(&dev_priv->sb_lock);
2986
2987 /* Assert data lane reset */
2988 chv_data_lane_soft_reset(encoder, true);
2989
2990 /* program left/right clock distribution */
2991 if (pipe != PIPE_B) {
2992 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2993 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2994 if (ch == DPIO_CH0)
2995 val |= CHV_BUFLEFTENA1_FORCE;
2996 if (ch == DPIO_CH1)
2997 val |= CHV_BUFRIGHTENA1_FORCE;
2998 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2999 } else {
3000 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3001 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3002 if (ch == DPIO_CH0)
3003 val |= CHV_BUFLEFTENA2_FORCE;
3004 if (ch == DPIO_CH1)
3005 val |= CHV_BUFRIGHTENA2_FORCE;
3006 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3007 }
3008
3009 /* program clock channel usage */
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3011 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3012 if (pipe != PIPE_B)
3013 val &= ~CHV_PCS_USEDCLKCHANNEL;
3014 else
3015 val |= CHV_PCS_USEDCLKCHANNEL;
3016 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3017
3018 if (intel_crtc->config->lane_count > 2) {
3019 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3020 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3021 if (pipe != PIPE_B)
3022 val &= ~CHV_PCS_USEDCLKCHANNEL;
3023 else
3024 val |= CHV_PCS_USEDCLKCHANNEL;
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3026 }
3027
3028 /*
3029 * This a a bit weird since generally CL
3030 * matches the pipe, but here we need to
3031 * pick the CL based on the port.
3032 */
3033 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3034 if (pipe != PIPE_B)
3035 val &= ~CHV_CMN_USEDCLKCHANNEL;
3036 else
3037 val |= CHV_CMN_USEDCLKCHANNEL;
3038 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3039
3040 mutex_unlock(&dev_priv->sb_lock);
3041} 2804}
3042 2805
3043static void chv_dp_post_pll_disable(struct intel_encoder *encoder) 2806static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3044{ 2807{
3045 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2808 chv_phy_post_pll_disable(encoder);
3046 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3047 u32 val;
3048
3049 mutex_lock(&dev_priv->sb_lock);
3050
3051 /* disable left/right clock distribution */
3052 if (pipe != PIPE_B) {
3053 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3054 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3055 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3056 } else {
3057 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3058 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3059 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3060 }
3061
3062 mutex_unlock(&dev_priv->sb_lock);
3063
3064 /*
3065 * Leave the power down bit cleared for at least one
3066 * lane so that chv_powergate_phy_ch() will power
3067 * on something when the channel is otherwise unused.
3068 * When the port is off and the override is removed
3069 * the lanes power down anyway, so otherwise it doesn't
3070 * really matter what the state of power down bits is
3071 * after this.
3072 */
3073 chv_phy_powergate_lanes(encoder, false, 0x0);
3074} 2809}
3075 2810
3076/* 2811/*
@@ -3178,16 +2913,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3178 2913
3179static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) 2914static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3180{ 2915{
3181 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2916 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3184 struct intel_crtc *intel_crtc =
3185 to_intel_crtc(dport->base.base.crtc);
3186 unsigned long demph_reg_value, preemph_reg_value, 2917 unsigned long demph_reg_value, preemph_reg_value,
3187 uniqtranscale_reg_value; 2918 uniqtranscale_reg_value;
3188 uint8_t train_set = intel_dp->train_set[0]; 2919 uint8_t train_set = intel_dp->train_set[0];
3189 enum dpio_channel port = vlv_dport_to_channel(dport);
3190 int pipe = intel_crtc->pipe;
3191 2920
3192 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2921 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3193 case DP_TRAIN_PRE_EMPH_LEVEL_0: 2922 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +2991,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3262 return 0; 2991 return 0;
3263 } 2992 }
3264 2993
3265 mutex_lock(&dev_priv->sb_lock); 2994 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 2995 uniqtranscale_reg_value, 0);
3267 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3269 uniqtranscale_reg_value);
3270 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3274 mutex_unlock(&dev_priv->sb_lock);
3275 2996
3276 return 0; 2997 return 0;
3277} 2998}
3278 2999
3279static bool chv_need_uniq_trans_scale(uint8_t train_set)
3280{
3281 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3282 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3283}
3284
3285static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 3000static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3286{ 3001{
3287 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3002 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3288 struct drm_i915_private *dev_priv = dev->dev_private; 3003 u32 deemph_reg_value, margin_reg_value;
3289 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 3004 bool uniq_trans_scale = false;
3290 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3291 u32 deemph_reg_value, margin_reg_value, val;
3292 uint8_t train_set = intel_dp->train_set[0]; 3005 uint8_t train_set = intel_dp->train_set[0];
3293 enum dpio_channel ch = vlv_dport_to_channel(dport);
3294 enum pipe pipe = intel_crtc->pipe;
3295 int i;
3296 3006
3297 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3007 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3298 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3008 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3022,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3313 deemph_reg_value = 128; 3023 deemph_reg_value = 128;
3314 margin_reg_value = 154; 3024 margin_reg_value = 154;
3315 /* FIXME extra to set for 1200 */ 3025 uniq_trans_scale = true;
3316 break; 3026 break;
3317 default: 3027 default:
3318 return 0; 3028 return 0;
@@ -3364,88 +3074,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3364 return 0; 3074 return 0;
3365 } 3075 }
3366 3076
3367 mutex_lock(&dev_priv->sb_lock); 3077 chv_set_phy_signal_level(encoder, deemph_reg_value,
3368 3078 margin_reg_value, uniq_trans_scale);
3369 /* Clear calc init */
3370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3371 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3372 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3373 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3374 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3375
3376 if (intel_crtc->config->lane_count > 2) {
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3382 }
3383
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3385 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3386 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3387 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3388
3389 if (intel_crtc->config->lane_count > 2) {
3390 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3391 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3392 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3393 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3394 }
3395
3396 /* Program swing deemph */
3397 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3398 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3399 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3400 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3401 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3402 }
3403
3404 /* Program swing margin */
3405 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3406 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3407
3408 val &= ~DPIO_SWING_MARGIN000_MASK;
3409 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3410
3411 /*
3412 * Supposedly this value shouldn't matter when unique transition
3413 * scale is disabled, but in fact it does matter. Let's just
3414 * always program the same value and hope it's OK.
3415 */
3416 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3417 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3418
3419 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3420 }
3421
3422 /*
3423 * The document said it needs to set bit 27 for ch0 and bit 26
3424 * for ch1. Might be a typo in the doc.
3425 * For now, for this unique transition scale selection, set bit
3426 * 27 for ch0 and ch1.
3427 */
3428 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3429 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3430 if (chv_need_uniq_trans_scale(train_set))
3431 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432 else
3433 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3434 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3435 }
3436
3437 /* Start swing calculation */
3438 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3439 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3441
3442 if (intel_crtc->config->lane_count > 2) {
3443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3444 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3446 }
3447
3448 mutex_unlock(&dev_priv->sb_lock);
3449 3079
3450 return 0; 3080 return 0;
3451} 3081}
@@ -3714,7 +3344,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3344 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev; 3345 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private; 3346 struct drm_i915_private *dev_priv = dev->dev_private;
3717 uint8_t rev;
3718 3347
3719 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3348 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0) 3349 sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3400,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3771 DRM_DEBUG_KMS("PSR2 %s on sink", 3400 DRM_DEBUG_KMS("PSR2 %s on sink",
3772 dev_priv->psr.psr2_support ? "supported" : "not supported"); 3401 dev_priv->psr.psr2_support ? "supported" : "not supported");
3773 } 3402 }
3403
3404 /* Read the eDP Display control capabilities registers */
3405 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3406 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3407 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3408 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3409 sizeof(intel_dp->edp_dpcd)))
3410 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3411 intel_dp->edp_dpcd);
3774 } 3412 }
3775 3413
3776 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3414 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3416,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3778 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3416 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3779 3417
3780 /* Intermediate frequency support */ 3418 /* Intermediate frequency support */
3781 if (is_edp(intel_dp) && 3419 if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3782 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3783 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3784 (rev >= 0x03)) { /* eDp v1.4 or higher */
3785 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3420 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3786 int i; 3421 int i;
3787 3422
@@ -4812,6 +4447,13 @@ done:
4812} 4447}
4813 4448
4814static void 4449static void
4450intel_dp_connector_unregister(struct drm_connector *connector)
4451{
4452 drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4453 intel_connector_unregister(connector);
4454}
4455
4456static void
4815intel_dp_connector_destroy(struct drm_connector *connector) 4457intel_dp_connector_destroy(struct drm_connector *connector)
4816{ 4458{
4817 struct intel_connector *intel_connector = to_intel_connector(connector); 4459 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4851,6 +4493,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4851 intel_dp->edp_notifier.notifier_call = NULL; 4493 intel_dp->edp_notifier.notifier_call = NULL;
4852 } 4494 }
4853 } 4495 }
4496
4497 intel_dp_aux_fini(intel_dp);
4498
4854 drm_encoder_cleanup(encoder); 4499 drm_encoder_cleanup(encoder);
4855 kfree(intel_dig_port); 4500 kfree(intel_dig_port);
4856} 4501}
@@ -4927,6 +4572,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4927 .fill_modes = drm_helper_probe_single_connector_modes, 4572 .fill_modes = drm_helper_probe_single_connector_modes,
4928 .set_property = intel_dp_set_property, 4573 .set_property = intel_dp_set_property,
4929 .atomic_get_property = intel_connector_atomic_get_property, 4574 .atomic_get_property = intel_connector_atomic_get_property,
4575 .early_unregister = intel_dp_connector_unregister,
4930 .destroy = intel_dp_connector_destroy, 4576 .destroy = intel_dp_connector_destroy,
4931 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4577 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4932 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 4578 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -4935,7 +4581,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4935static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4581static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4936 .get_modes = intel_dp_get_modes, 4582 .get_modes = intel_dp_get_modes,
4937 .mode_valid = intel_dp_mode_valid, 4583 .mode_valid = intel_dp_mode_valid,
4938 .best_encoder = intel_best_encoder,
4939}; 4584};
4940 4585
4941static const struct drm_encoder_funcs intel_dp_enc_funcs = { 4586static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -4977,9 +4622,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4977 intel_display_power_get(dev_priv, power_domain); 4622 intel_display_power_get(dev_priv, power_domain);
4978 4623
4979 if (long_hpd) { 4624 if (long_hpd) {
4980 /* indicate that we need to restart link training */
4981 intel_dp->train_set_valid = false;
4982
4983 intel_dp_long_pulse(intel_dp->attached_connector); 4625 intel_dp_long_pulse(intel_dp->attached_connector);
4984 if (intel_dp->is_mst) 4626 if (intel_dp->is_mst)
4985 ret = IRQ_HANDLED; 4627 ret = IRQ_HANDLED;
@@ -5590,14 +5232,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5590 * 5232 *
5591 * DRRS saves power by switching to low RR based on usage scenarios. 5233 * DRRS saves power by switching to low RR based on usage scenarios.
5592 * 5234 *
5593 * eDP DRRS:- 5235 * The implementation is based on frontbuffer tracking implementation. When
5594 * The implementation is based on frontbuffer tracking implementation. 5236 * there is a disturbance on the screen triggered by user activity or a periodic
5595 * When there is a disturbance on the screen triggered by user activity or a 5237 * system activity, DRRS is disabled (RR is changed to high RR). When there is
5596 * periodic system activity, DRRS is disabled (RR is changed to high RR). 5238 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5597 * When there is no movement on screen, after a timeout of 1 second, a switch 5239 * made.
5598 * to low RR is made. 5240 *
5599 * For integration with frontbuffer tracking code, 5241 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5600 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. 5242 * and intel_edp_drrs_flush() are called.
5601 * 5243 *
5602 * DRRS can be further extended to support other internal panels and also 5244 * DRRS can be further extended to support other internal panels and also
5603 * the scenario of video playback wherein RR is set based on the rate 5245 * the scenario of video playback wherein RR is set based on the rate
@@ -5725,8 +5367,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5725 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5367 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5726 fixed_mode = drm_mode_duplicate(dev, 5368 fixed_mode = drm_mode_duplicate(dev,
5727 dev_priv->vbt.lfp_lvds_vbt_mode); 5369 dev_priv->vbt.lfp_lvds_vbt_mode);
5728 if (fixed_mode) 5370 if (fixed_mode) {
5729 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5371 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5372 connector->display_info.width_mm = fixed_mode->width_mm;
5373 connector->display_info.height_mm = fixed_mode->height_mm;
5374 }
5730 } 5375 }
5731 mutex_unlock(&dev->mode_config.mutex); 5376 mutex_unlock(&dev->mode_config.mutex);
5732 5377
@@ -5840,7 +5485,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5840 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5485 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5841 else 5486 else
5842 intel_connector->get_hw_state = intel_connector_get_hw_state; 5487 intel_connector->get_hw_state = intel_connector_get_hw_state;
5843 intel_connector->unregister = intel_dp_connector_unregister;
5844 5488
5845 /* Set up the hotplug pin. */ 5489 /* Set up the hotplug pin. */
5846 switch (port) { 5490 switch (port) {
@@ -5923,9 +5567,9 @@ fail:
5923 return false; 5567 return false;
5924} 5568}
5925 5569
5926void 5570bool intel_dp_init(struct drm_device *dev,
5927intel_dp_init(struct drm_device *dev, 5571 i915_reg_t output_reg,
5928 i915_reg_t output_reg, enum port port) 5572 enum port port)
5929{ 5573{
5930 struct drm_i915_private *dev_priv = dev->dev_private; 5574 struct drm_i915_private *dev_priv = dev->dev_private;
5931 struct intel_digital_port *intel_dig_port; 5575 struct intel_digital_port *intel_dig_port;
@@ -5935,7 +5579,7 @@ intel_dp_init(struct drm_device *dev,
5935 5579
5936 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 5580 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5937 if (!intel_dig_port) 5581 if (!intel_dig_port)
5938 return; 5582 return false;
5939 5583
5940 intel_connector = intel_connector_alloc(); 5584 intel_connector = intel_connector_alloc();
5941 if (!intel_connector) 5585 if (!intel_connector)
@@ -5945,7 +5589,7 @@ intel_dp_init(struct drm_device *dev,
5945 encoder = &intel_encoder->base; 5589 encoder = &intel_encoder->base;
5946 5590
5947 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 5591 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5948 DRM_MODE_ENCODER_TMDS, NULL)) 5592 DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5949 goto err_encoder_init; 5593 goto err_encoder_init;
5950 5594
5951 intel_encoder->compute_config = intel_dp_compute_config; 5595 intel_encoder->compute_config = intel_dp_compute_config;
@@ -5992,7 +5636,7 @@ intel_dp_init(struct drm_device *dev,
5992 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 5636 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5993 goto err_init_connector; 5637 goto err_init_connector;
5994 5638
5995 return; 5639 return true;
5996 5640
5997err_init_connector: 5641err_init_connector:
5998 drm_encoder_cleanup(encoder); 5642 drm_encoder_cleanup(encoder);
@@ -6000,8 +5644,7 @@ err_encoder_init:
6000 kfree(intel_connector); 5644 kfree(intel_connector);
6001err_connector_alloc: 5645err_connector_alloc:
6002 kfree(intel_dig_port); 5646 kfree(intel_dig_port);
6003 5647 return false;
6004 return;
6005} 5648}
6006 5649
6007void intel_dp_mst_suspend(struct drm_device *dev) 5650void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "intel_drv.h"
26
27static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
28{
29 uint8_t reg_val = 0;
30
31 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
32 &reg_val) < 0) {
33 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
34 DP_EDP_DISPLAY_CONTROL_REGISTER);
35 return;
36 }
37 if (enable)
38 reg_val |= DP_EDP_BACKLIGHT_ENABLE;
39 else
40 reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
41
42 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
43 reg_val) != 1) {
44 DRM_DEBUG_KMS("Failed to %s aux backlight\n",
45 enable ? "enable" : "disable");
46 }
47}
48
49/*
50 * Read the current backlight value from DPCD register(s) based
51 * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
52 */
53static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
54{
55 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
56 uint8_t read_val[2] = { 0x0 };
57 uint16_t level = 0;
58
59 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
60 &read_val, sizeof(read_val)) < 0) {
61 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
62 DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
63 return 0;
64 }
65 level = read_val[0];
66 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
67 level = (read_val[0] << 8 | read_val[1]);
68
69 return level;
70}
71
72/*
73 * Sends the current backlight level over the aux channel, checking if its using
74 * 8-bit or 16 bit value (MSB and LSB)
75 */
76static void
77intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
78{
79 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
80 uint8_t vals[2] = { 0x0 };
81
82 vals[0] = level;
83
84 /* Write the MSB and/or LSB */
85 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
86 vals[0] = (level & 0xFF00) >> 8;
87 vals[1] = (level & 0xFF);
88 }
89 if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
90 vals, sizeof(vals)) < 0) {
91 DRM_DEBUG_KMS("Failed to write aux backlight level\n");
92 return;
93 }
94}
95
96static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
97{
98 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
99 uint8_t dpcd_buf = 0;
100
101 set_aux_backlight_enable(intel_dp, true);
102
103 if ((drm_dp_dpcd_readb(&intel_dp->aux,
104 DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
105 ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
106 DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
107 drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
108 (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
109}
110
111static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
112{
113 set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
114}
115
116static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
117 enum pipe pipe)
118{
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel;
121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF;
126 else
127 panel->backlight.max = 0xFF;
128
129 panel->backlight.min = 0;
130 panel->backlight.level = intel_dp_aux_get_backlight(connector);
131
132 panel->backlight.enabled = panel->backlight.level != 0;
133
134 return 0;
135}
136
137static bool
138intel_dp_aux_display_control_capable(struct intel_connector *connector)
139{
140 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
141
142 /* Check the eDP Display control capabilities registers to determine if
143 * the panel can support backlight control over the aux channel
144 */
145 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
146 (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
147 !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
148 (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
149 DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
150 return true;
151 }
152 return false;
153}
154
155int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
156{
157 struct intel_panel *panel = &intel_connector->panel;
158
159 if (!i915.enable_dpcd_backlight)
160 return -ENODEV;
161
162 if (!intel_dp_aux_display_control_capable(intel_connector))
163 return -ENODEV;
164
165 panel->backlight.setup = intel_dp_aux_setup_backlight;
166 panel->backlight.enable = intel_dp_aux_enable_backlight;
167 panel->backlight.disable = intel_dp_aux_disable_backlight;
168 panel->backlight.set = intel_dp_aux_set_backlight;
169 panel->backlight.get = intel_dp_aux_get_backlight;
170
171 return 0;
172}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 0b8eefc2acc5..60fb39cd220b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -85,8 +85,7 @@ static bool
85intel_dp_reset_link_train(struct intel_dp *intel_dp, 85intel_dp_reset_link_train(struct intel_dp *intel_dp,
86 uint8_t dp_train_pat) 86 uint8_t dp_train_pat)
87{ 87{
88 if (!intel_dp->train_set_valid) 88 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
89 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
90 intel_dp_set_signal_levels(intel_dp); 89 intel_dp_set_signal_levels(intel_dp);
91 return intel_dp_set_link_train(intel_dp, dp_train_pat); 90 return intel_dp_set_link_train(intel_dp, dp_train_pat);
92} 91}
@@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
161 break; 160 break;
162 } 161 }
163 162
164 /*
165 * if we used previously trained voltage and pre-emphasis values
166 * and we don't get clock recovery, reset link training values
167 */
168 if (intel_dp->train_set_valid) {
169 DRM_DEBUG_KMS("clock recovery not ok, reset");
170 /* clear the flag as we are not reusing train set */
171 intel_dp->train_set_valid = false;
172 if (!intel_dp_reset_link_train(intel_dp,
173 DP_TRAINING_PATTERN_1 |
174 DP_LINK_SCRAMBLING_DISABLE)) {
175 DRM_ERROR("failed to enable link training\n");
176 return;
177 }
178 continue;
179 }
180
181 /* Check to see if we've tried the max voltage */ 163 /* Check to see if we've tried the max voltage */
182 for (i = 0; i < intel_dp->lane_count; i++) 164 for (i = 0; i < intel_dp->lane_count; i++)
183 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 165 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
284 /* Make sure clock is still ok */ 266 /* Make sure clock is still ok */
285 if (!drm_dp_clock_recovery_ok(link_status, 267 if (!drm_dp_clock_recovery_ok(link_status,
286 intel_dp->lane_count)) { 268 intel_dp->lane_count)) {
287 intel_dp->train_set_valid = false;
288 intel_dp_link_training_clock_recovery(intel_dp); 269 intel_dp_link_training_clock_recovery(intel_dp);
289 intel_dp_set_link_train(intel_dp, 270 intel_dp_set_link_train(intel_dp,
290 training_pattern | 271 training_pattern |
@@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
301 282
302 /* Try 5 times, then try clock recovery if that fails */ 283 /* Try 5 times, then try clock recovery if that fails */
303 if (tries > 5) { 284 if (tries > 5) {
304 intel_dp->train_set_valid = false;
305 intel_dp_link_training_clock_recovery(intel_dp); 285 intel_dp_link_training_clock_recovery(intel_dp);
306 intel_dp_set_link_train(intel_dp, 286 intel_dp_set_link_train(intel_dp,
307 training_pattern | 287 training_pattern |
@@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
322 302
323 intel_dp_set_idle_link_train(intel_dp); 303 intel_dp_set_idle_link_train(intel_dp);
324 304
325 if (channel_eq) { 305 if (channel_eq)
326 intel_dp->train_set_valid = true;
327 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 306 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
328 }
329} 307}
330 308
331void intel_dp_stop_link_train(struct intel_dp *intel_dp) 309void intel_dp_stop_link_train(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..9646816604be 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
336 .fill_modes = drm_helper_probe_single_connector_modes, 336 .fill_modes = drm_helper_probe_single_connector_modes,
337 .set_property = intel_dp_mst_set_property, 337 .set_property = intel_dp_mst_set_property,
338 .atomic_get_property = intel_connector_atomic_get_property, 338 .atomic_get_property = intel_connector_atomic_get_property,
339 .early_unregister = intel_connector_unregister,
339 .destroy = intel_dp_mst_connector_destroy, 340 .destroy = intel_dp_mst_connector_destroy,
340 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 341 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
341 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 342 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -455,7 +456,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
455 drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); 456 drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
456 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 457 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
457 458
458 intel_connector->unregister = intel_connector_unregister;
459 intel_connector->get_hw_state = intel_dp_mst_get_hw_state; 459 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
460 intel_connector->mst_port = intel_dp; 460 intel_connector->mst_port = intel_dp;
461 intel_connector->port = port; 461 intel_connector->port = port;
@@ -489,7 +489,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
489 struct intel_connector *intel_connector = to_intel_connector(connector); 489 struct intel_connector *intel_connector = to_intel_connector(connector);
490 struct drm_device *dev = connector->dev; 490 struct drm_device *dev = connector->dev;
491 491
492 intel_connector->unregister(intel_connector); 492 drm_connector_unregister(connector);
493 493
494 /* need to nuke the connector */ 494 /* need to nuke the connector */
495 drm_modeset_lock_all(dev); 495 drm_modeset_lock_all(dev);
@@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
534 intel_mst->primary = intel_dig_port; 534 intel_mst->primary = intel_dig_port;
535 535
536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
537 DRM_MODE_ENCODER_DPMST, NULL); 537 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
538 538
539 intel_encoder->type = INTEL_OUTPUT_DP_MST; 539 intel_encoder->type = INTEL_OUTPUT_DP_MST;
540 intel_encoder->crtc_mask = 0x7; 540 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..288da35572b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26void chv_set_phy_signal_level(struct intel_encoder *encoder,
27 u32 deemph_reg_value, u32 margin_reg_value,
28 bool uniq_trans_scale)
29{
30 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
31 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
32 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
33 enum dpio_channel ch = vlv_dport_to_channel(dport);
34 enum pipe pipe = intel_crtc->pipe;
35 u32 val;
36 int i;
37
38 mutex_lock(&dev_priv->sb_lock);
39
40 /* Clear calc init */
41 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
42 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
43 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
44 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
45 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
46
47 if (intel_crtc->config->lane_count > 2) {
48 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
49 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
50 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
51 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
52 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
53 }
54
55 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
56 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
57 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
58 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
59
60 if (intel_crtc->config->lane_count > 2) {
61 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
62 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
63 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
64 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
65 }
66
67 /* Program swing deemph */
68 for (i = 0; i < intel_crtc->config->lane_count; i++) {
69 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
70 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
71 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
72 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
73 }
74
75 /* Program swing margin */
76 for (i = 0; i < intel_crtc->config->lane_count; i++) {
77 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
78
79 val &= ~DPIO_SWING_MARGIN000_MASK;
80 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
81
82 /*
83 * Supposedly this value shouldn't matter when unique transition
84 * scale is disabled, but in fact it does matter. Let's just
85 * always program the same value and hope it's OK.
86 */
87 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
88 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
89
90 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
91 }
92
93 /*
94 * The document said it needs to set bit 27 for ch0 and bit 26
95 * for ch1. Might be a typo in the doc.
96 * For now, for this unique transition scale selection, set bit
97 * 27 for ch0 and ch1.
98 */
99 for (i = 0; i < intel_crtc->config->lane_count; i++) {
100 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
101 if (uniq_trans_scale)
102 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
103 else
104 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
105 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
106 }
107
108 /* Start swing calculation */
109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
110 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
111 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
112
113 if (intel_crtc->config->lane_count > 2) {
114 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
115 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
116 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
117 }
118
119 mutex_unlock(&dev_priv->sb_lock);
120
121}
122
123void chv_data_lane_soft_reset(struct intel_encoder *encoder,
124 bool reset)
125{
126 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
127 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
128 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
129 enum pipe pipe = crtc->pipe;
130 uint32_t val;
131
132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
133 if (reset)
134 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
135 else
136 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
137 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
138
139 if (crtc->config->lane_count > 2) {
140 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
141 if (reset)
142 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
143 else
144 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
145 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
146 }
147
148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
149 val |= CHV_PCS_REQ_SOFTRESET_EN;
150 if (reset)
151 val &= ~DPIO_PCS_CLK_SOFT_RESET;
152 else
153 val |= DPIO_PCS_CLK_SOFT_RESET;
154 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
155
156 if (crtc->config->lane_count > 2) {
157 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
158 val |= CHV_PCS_REQ_SOFTRESET_EN;
159 if (reset)
160 val &= ~DPIO_PCS_CLK_SOFT_RESET;
161 else
162 val |= DPIO_PCS_CLK_SOFT_RESET;
163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
164 }
165}
166
167void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
168{
169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
170 struct drm_device *dev = encoder->base.dev;
171 struct drm_i915_private *dev_priv = dev->dev_private;
172 struct intel_crtc *intel_crtc =
173 to_intel_crtc(encoder->base.crtc);
174 enum dpio_channel ch = vlv_dport_to_channel(dport);
175 enum pipe pipe = intel_crtc->pipe;
176 unsigned int lane_mask =
177 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
178 u32 val;
179
180 /*
181 * Must trick the second common lane into life.
182 * Otherwise we can't even access the PLL.
183 */
184 if (ch == DPIO_CH0 && pipe == PIPE_B)
185 dport->release_cl2_override =
186 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
187
188 chv_phy_powergate_lanes(encoder, true, lane_mask);
189
190 mutex_lock(&dev_priv->sb_lock);
191
192 /* Assert data lane reset */
193 chv_data_lane_soft_reset(encoder, true);
194
195 /* program left/right clock distribution */
196 if (pipe != PIPE_B) {
197 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
198 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
199 if (ch == DPIO_CH0)
200 val |= CHV_BUFLEFTENA1_FORCE;
201 if (ch == DPIO_CH1)
202 val |= CHV_BUFRIGHTENA1_FORCE;
203 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
204 } else {
205 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
206 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
207 if (ch == DPIO_CH0)
208 val |= CHV_BUFLEFTENA2_FORCE;
209 if (ch == DPIO_CH1)
210 val |= CHV_BUFRIGHTENA2_FORCE;
211 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
212 }
213
214 /* program clock channel usage */
215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
216 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
217 if (pipe != PIPE_B)
218 val &= ~CHV_PCS_USEDCLKCHANNEL;
219 else
220 val |= CHV_PCS_USEDCLKCHANNEL;
221 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
222
223 if (intel_crtc->config->lane_count > 2) {
224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
225 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
226 if (pipe != PIPE_B)
227 val &= ~CHV_PCS_USEDCLKCHANNEL;
228 else
229 val |= CHV_PCS_USEDCLKCHANNEL;
230 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
231 }
232
233 /*
234 * This a a bit weird since generally CL
235 * matches the pipe, but here we need to
236 * pick the CL based on the port.
237 */
238 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
239 if (pipe != PIPE_B)
240 val &= ~CHV_CMN_USEDCLKCHANNEL;
241 else
242 val |= CHV_CMN_USEDCLKCHANNEL;
243 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
244
245 mutex_unlock(&dev_priv->sb_lock);
246}
247
248void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
249{
250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_crtc *intel_crtc =
255 to_intel_crtc(encoder->base.crtc);
256 enum dpio_channel ch = vlv_dport_to_channel(dport);
257 int pipe = intel_crtc->pipe;
258 int data, i, stagger;
259 u32 val;
260
261 mutex_lock(&dev_priv->sb_lock);
262
263 /* allow hardware to manage TX FIFO reset source */
264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
265 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
266 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
267
268 if (intel_crtc->config->lane_count > 2) {
269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
270 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
271 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
272 }
273
274 /* Program Tx lane latency optimal setting*/
275 for (i = 0; i < intel_crtc->config->lane_count; i++) {
276 /* Set the upar bit */
277 if (intel_crtc->config->lane_count == 1)
278 data = 0x0;
279 else
280 data = (i == 1) ? 0x0 : 0x1;
281 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
282 data << DPIO_UPAR_SHIFT);
283 }
284
285 /* Data lane stagger programming */
286 if (intel_crtc->config->port_clock > 270000)
287 stagger = 0x18;
288 else if (intel_crtc->config->port_clock > 135000)
289 stagger = 0xd;
290 else if (intel_crtc->config->port_clock > 67500)
291 stagger = 0x7;
292 else if (intel_crtc->config->port_clock > 33750)
293 stagger = 0x4;
294 else
295 stagger = 0x2;
296
297 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
298 val |= DPIO_TX2_STAGGER_MASK(0x1f);
299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
300
301 if (intel_crtc->config->lane_count > 2) {
302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
303 val |= DPIO_TX2_STAGGER_MASK(0x1f);
304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
305 }
306
307 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
308 DPIO_LANESTAGGER_STRAP(stagger) |
309 DPIO_LANESTAGGER_STRAP_OVRD |
310 DPIO_TX1_STAGGER_MASK(0x1f) |
311 DPIO_TX1_STAGGER_MULT(6) |
312 DPIO_TX2_STAGGER_MULT(0));
313
314 if (intel_crtc->config->lane_count > 2) {
315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
316 DPIO_LANESTAGGER_STRAP(stagger) |
317 DPIO_LANESTAGGER_STRAP_OVRD |
318 DPIO_TX1_STAGGER_MASK(0x1f) |
319 DPIO_TX1_STAGGER_MULT(7) |
320 DPIO_TX2_STAGGER_MULT(5));
321 }
322
323 /* Deassert data lane reset */
324 chv_data_lane_soft_reset(encoder, false);
325
326 mutex_unlock(&dev_priv->sb_lock);
327}
328
329void chv_phy_release_cl2_override(struct intel_encoder *encoder)
330{
331 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
332 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
333
334 if (dport->release_cl2_override) {
335 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
336 dport->release_cl2_override = false;
337 }
338}
339
340void chv_phy_post_pll_disable(struct intel_encoder *encoder)
341{
342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
343 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
344 u32 val;
345
346 mutex_lock(&dev_priv->sb_lock);
347
348 /* disable left/right clock distribution */
349 if (pipe != PIPE_B) {
350 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
351 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
352 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
353 } else {
354 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
355 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
356 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
357 }
358
359 mutex_unlock(&dev_priv->sb_lock);
360
361 /*
362 * Leave the power down bit cleared for at least one
363 * lane so that chv_powergate_phy_ch() will power
364 * on something when the channel is otherwise unused.
365 * When the port is off and the override is removed
366 * the lanes power down anyway, so otherwise it doesn't
367 * really matter what the state of power down bits is
368 * after this.
369 */
370 chv_phy_powergate_lanes(encoder, false, 0x0);
371}
372
373void vlv_set_phy_signal_level(struct intel_encoder *encoder,
374 u32 demph_reg_value, u32 preemph_reg_value,
375 u32 uniqtranscale_reg_value, u32 tx3_demph)
376{
377 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
378 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
379 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
380 enum dpio_channel port = vlv_dport_to_channel(dport);
381 int pipe = intel_crtc->pipe;
382
383 mutex_lock(&dev_priv->sb_lock);
384 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
385 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
386 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
387 uniqtranscale_reg_value);
388 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
389
390 if (tx3_demph)
391 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
392
393 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
394 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
395 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
396 mutex_unlock(&dev_priv->sb_lock);
397}
398
399void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
400{
401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
402 struct drm_device *dev = encoder->base.dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
404 struct intel_crtc *intel_crtc =
405 to_intel_crtc(encoder->base.crtc);
406 enum dpio_channel port = vlv_dport_to_channel(dport);
407 int pipe = intel_crtc->pipe;
408
409 /* Program Tx lane resets to default */
410 mutex_lock(&dev_priv->sb_lock);
411 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
412 DPIO_PCS_TX_LANE2_RESET |
413 DPIO_PCS_TX_LANE1_RESET);
414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
415 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
416 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
417 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
418 DPIO_PCS_CLK_SOFT_RESET);
419
420 /* Fix up inter-pair skew failure */
421 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
422 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
423 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
424 mutex_unlock(&dev_priv->sb_lock);
425}
426
427void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
428{
429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
431 struct drm_device *dev = encoder->base.dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
434 enum dpio_channel port = vlv_dport_to_channel(dport);
435 int pipe = intel_crtc->pipe;
436 u32 val;
437
438 mutex_lock(&dev_priv->sb_lock);
439
440 /* Enable clock channels for this port */
441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
442 val = 0;
443 if (pipe)
444 val |= (1<<21);
445 else
446 val &= ~(1<<21);
447 val |= 0x001000c4;
448 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
449
450 /* Program lane clock */
451 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
452 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
453
454 mutex_unlock(&dev_priv->sb_lock);
455}
456
457void vlv_phy_reset_lanes(struct intel_encoder *encoder)
458{
459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
460 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
461 struct intel_crtc *intel_crtc =
462 to_intel_crtc(encoder->base.crtc);
463 enum dpio_channel port = vlv_dport_to_channel(dport);
464 int pipe = intel_crtc->pipe;
465
466 mutex_lock(&dev_priv->sb_lock);
467 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
468 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
469 mutex_unlock(&dev_priv->sb_lock);
470}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 3ac705936b04..c0eff1571731 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
208 if (memcmp(&crtc_state->dpll_hw_state, 208 if (memcmp(&crtc_state->dpll_hw_state,
209 &shared_dpll[i].hw_state, 209 &shared_dpll[i].hw_state,
210 sizeof(crtc_state->dpll_hw_state)) == 0) { 210 sizeof(crtc_state->dpll_hw_state)) == 0) {
211 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", 211 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
212 crtc->base.base.id, pll->name, 212 crtc->base.base.id, crtc->base.name, pll->name,
213 shared_dpll[i].crtc_mask, 213 shared_dpll[i].crtc_mask,
214 pll->active_mask); 214 pll->active_mask);
215 return pll; 215 return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
220 for (i = range_min; i <= range_max; i++) { 220 for (i = range_min; i <= range_max; i++) {
221 pll = &dev_priv->shared_dplls[i]; 221 pll = &dev_priv->shared_dplls[i];
222 if (shared_dpll[i].crtc_mask == 0) { 222 if (shared_dpll[i].crtc_mask == 0) {
223 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 223 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
224 crtc->base.base.id, pll->name); 224 crtc->base.base.id, crtc->base.name, pll->name);
225 return pll; 225 return pll;
226 } 226 }
227 } 227 }
@@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
358 i = (enum intel_dpll_id) crtc->pipe; 358 i = (enum intel_dpll_id) crtc->pipe;
359 pll = &dev_priv->shared_dplls[i]; 359 pll = &dev_priv->shared_dplls[i];
360 360
361 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 361 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
362 crtc->base.base.id, pll->name); 362 crtc->base.base.id, crtc->base.name, pll->name);
363 } else { 363 } else {
364 pll = intel_find_shared_dpll(crtc, crtc_state, 364 pll = intel_find_shared_dpll(crtc, crtc_state,
365 DPLL_ID_PCH_PLL_A, 365 DPLL_ID_PCH_PLL_A,
366 DPLL_ID_PCH_PLL_B); 366 DPLL_ID_PCH_PLL_B);
367 } 367 }
368 368
369 if (!pll)
370 return NULL;
371
369 /* reference the pll */ 372 /* reference the pll */
370 intel_reference_shared_dpll(pll, crtc_state); 373 intel_reference_shared_dpll(pll, crtc_state);
371 374
@@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1236 case 162000: 1239 case 162000:
1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1240 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1238 break; 1241 break;
1239 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1240 results in CDCLK change. Need to handle the change of CDCLK by
1241 disabling pipes and re-enabling them */
1242 case 108000: 1242 case 108000:
1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1244 break; 1244 break;
@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1508 int clock = crtc_state->port_clock; 1508 int clock = crtc_state->port_clock;
1509 1509
1510 if (encoder->type == INTEL_OUTPUT_HDMI) { 1510 if (encoder->type == INTEL_OUTPUT_HDMI) {
1511 intel_clock_t best_clock; 1511 struct dpll best_clock;
1512 1512
1513 /* Calculate HDMI div */ 1513 /* Calculate HDMI div */
1514 /* 1514 /*
@@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1613 i = (enum intel_dpll_id) intel_dig_port->port; 1613 i = (enum intel_dpll_id) intel_dig_port->port;
1614 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1614 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1615 1615
1616 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 1616 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1617 crtc->base.base.id, pll->name); 1617 crtc->base.base.id, crtc->base.name, pll->name);
1618 1618
1619 intel_reference_shared_dpll(pll, crtc_state); 1619 intel_reference_shared_dpll(pll, crtc_state);
1620 1620
@@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1633static void intel_ddi_pll_init(struct drm_device *dev) 1633static void intel_ddi_pll_init(struct drm_device *dev)
1634{ 1634{
1635 struct drm_i915_private *dev_priv = dev->dev_private; 1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636 uint32_t val = I915_READ(LCPLL_CTL); 1636
1637 1637 if (INTEL_GEN(dev_priv) < 9) {
1638 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1638 uint32_t val = I915_READ(LCPLL_CTL);
1639 int cdclk_freq; 1639
1640
1641 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
1642 dev_priv->skl_boot_cdclk = cdclk_freq;
1643 if (skl_sanitize_cdclk(dev_priv))
1644 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
1645 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1646 DRM_ERROR("LCPLL1 is disabled\n");
1647 } else if (!IS_BROXTON(dev_priv)) {
1648 /* 1640 /*
1649 * The LCPLL register should be turned on by the BIOS. For now 1641 * The LCPLL register should be turned on by the BIOS. For now
1650 * let's just check its state and print errors in case 1642 * let's just check its state and print errors in case
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a28b4aac1e02..089a42577ea3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -242,14 +242,6 @@ struct intel_connector {
242 * and active (i.e. dpms ON state). */ 242 * and active (i.e. dpms ON state). */
243 bool (*get_hw_state)(struct intel_connector *); 243 bool (*get_hw_state)(struct intel_connector *);
244 244
245 /*
246 * Removes all interfaces through which the connector is accessible
247 * - like sysfs, debugfs entries -, so that no new operations can be
248 * started on the connector. Also makes sure all currently pending
249 * operations finish before returing.
250 */
251 void (*unregister)(struct intel_connector *);
252
253 /* Panel info for eDP and LVDS */ 245 /* Panel info for eDP and LVDS */
254 struct intel_panel panel; 246 struct intel_panel panel;
255 247
@@ -266,7 +258,7 @@ struct intel_connector {
266 struct intel_dp *mst_port; 258 struct intel_dp *mst_port;
267}; 259};
268 260
269typedef struct dpll { 261struct dpll {
270 /* given values */ 262 /* given values */
271 int n; 263 int n;
272 int m1, m2; 264 int m1, m2;
@@ -276,7 +268,7 @@ typedef struct dpll {
276 int vco; 268 int vco;
277 int m; 269 int m;
278 int p; 270 int p;
279} intel_clock_t; 271};
280 272
281struct intel_atomic_state { 273struct intel_atomic_state {
282 struct drm_atomic_state base; 274 struct drm_atomic_state base;
@@ -291,17 +283,32 @@ struct intel_atomic_state {
291 283
292 bool dpll_set, modeset; 284 bool dpll_set, modeset;
293 285
286 /*
287 * Does this transaction change the pipes that are active? This mask
288 * tracks which CRTC's have changed their active state at the end of
289 * the transaction (not counting the temporary disable during modesets).
290 * This mask should only be non-zero when intel_state->modeset is true,
291 * but the converse is not necessarily true; simply changing a mode may
292 * not flip the final active status of any CRTC's
293 */
294 unsigned int active_pipe_changes;
295
294 unsigned int active_crtcs; 296 unsigned int active_crtcs;
295 unsigned int min_pixclk[I915_MAX_PIPES]; 297 unsigned int min_pixclk[I915_MAX_PIPES];
296 298
299 /* SKL/KBL Only */
300 unsigned int cdclk_pll_vco;
301
297 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 302 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
298 struct intel_wm_config wm_config;
299 303
300 /* 304 /*
301 * Current watermarks can't be trusted during hardware readout, so 305 * Current watermarks can't be trusted during hardware readout, so
302 * don't bother calculating intermediate watermarks. 306 * don't bother calculating intermediate watermarks.
303 */ 307 */
304 bool skip_intermediate_wm; 308 bool skip_intermediate_wm;
309
310 /* Gen9+ only */
311 struct skl_wm_values wm_results;
305}; 312};
306 313
307struct intel_plane_state { 314struct intel_plane_state {
@@ -405,6 +412,48 @@ struct skl_pipe_wm {
405 uint32_t linetime; 412 uint32_t linetime;
406}; 413};
407 414
415struct intel_crtc_wm_state {
416 union {
417 struct {
418 /*
419 * Intermediate watermarks; these can be
420 * programmed immediately since they satisfy
421 * both the current configuration we're
422 * switching away from and the new
423 * configuration we're switching to.
424 */
425 struct intel_pipe_wm intermediate;
426
427 /*
428 * Optimal watermarks, programmed post-vblank
429 * when this state is committed.
430 */
431 struct intel_pipe_wm optimal;
432 } ilk;
433
434 struct {
435 /* gen9+ only needs 1-step wm programming */
436 struct skl_pipe_wm optimal;
437
438 /* cached plane data rate */
439 unsigned plane_data_rate[I915_MAX_PLANES];
440 unsigned plane_y_data_rate[I915_MAX_PLANES];
441
442 /* minimum block allocation */
443 uint16_t minimum_blocks[I915_MAX_PLANES];
444 uint16_t minimum_y_blocks[I915_MAX_PLANES];
445 } skl;
446 };
447
448 /*
449 * Platforms with two-step watermark programming will need to
450 * update watermark programming post-vblank to switch from the
451 * safe intermediate watermarks to the optimal final
452 * watermarks.
453 */
454 bool need_postvbl_update;
455};
456
408struct intel_crtc_state { 457struct intel_crtc_state {
409 struct drm_crtc_state base; 458 struct drm_crtc_state base;
410 459
@@ -522,6 +571,12 @@ struct intel_crtc_state {
522 571
523 uint8_t lane_count; 572 uint8_t lane_count;
524 573
574 /*
575 * Used by platforms having DP/HDMI PHY with programmable lane
576 * latency optimization.
577 */
578 uint8_t lane_lat_optim_mask;
579
525 /* Panel fitter controls for gen2-gen4 + VLV */ 580 /* Panel fitter controls for gen2-gen4 + VLV */
526 struct { 581 struct {
527 u32 control; 582 u32 control;
@@ -558,32 +613,7 @@ struct intel_crtc_state {
558 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ 613 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
559 bool disable_lp_wm; 614 bool disable_lp_wm;
560 615
561 struct { 616 struct intel_crtc_wm_state wm;
562 /*
563 * Optimal watermarks, programmed post-vblank when this state
564 * is committed.
565 */
566 union {
567 struct intel_pipe_wm ilk;
568 struct skl_pipe_wm skl;
569 } optimal;
570
571 /*
572 * Intermediate watermarks; these can be programmed immediately
573 * since they satisfy both the current configuration we're
574 * switching away from and the new configuration we're switching
575 * to.
576 */
577 struct intel_pipe_wm intermediate;
578
579 /*
580 * Platforms with two-step watermark programming will need to
581 * update watermark programming post-vblank to switch from the
582 * safe intermediate watermarks to the optimal final
583 * watermarks.
584 */
585 bool need_postvbl_update;
586 } wm;
587 617
588 /* Gamma mode programmed on the pipe */ 618 /* Gamma mode programmed on the pipe */
589 uint32_t gamma_mode; 619 uint32_t gamma_mode;
@@ -598,14 +628,6 @@ struct vlv_wm_state {
598 bool cxsr; 628 bool cxsr;
599}; 629};
600 630
601struct intel_mmio_flip {
602 struct work_struct work;
603 struct drm_i915_private *i915;
604 struct drm_i915_gem_request *req;
605 struct intel_crtc *crtc;
606 unsigned int rotation;
607};
608
609struct intel_crtc { 631struct intel_crtc {
610 struct drm_crtc base; 632 struct drm_crtc base;
611 enum pipe pipe; 633 enum pipe pipe;
@@ -620,7 +642,7 @@ struct intel_crtc {
620 unsigned long enabled_power_domains; 642 unsigned long enabled_power_domains;
621 bool lowfreq_avail; 643 bool lowfreq_avail;
622 struct intel_overlay *overlay; 644 struct intel_overlay *overlay;
623 struct intel_unpin_work *unpin_work; 645 struct intel_flip_work *flip_work;
624 646
625 atomic_t unpin_work_count; 647 atomic_t unpin_work_count;
626 648
@@ -815,6 +837,7 @@ struct intel_dp {
815 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 837 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
816 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 838 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
817 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 839 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
840 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
818 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 841 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
819 uint8_t num_sink_rates; 842 uint8_t num_sink_rates;
820 int sink_rates[DP_MAX_SUPPORTED_RATES]; 843 int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -863,8 +886,6 @@ struct intel_dp {
863 /* This is called before a link training is starterd */ 886 /* This is called before a link training is starterd */
864 void (*prepare_link_retrain)(struct intel_dp *intel_dp); 887 void (*prepare_link_retrain)(struct intel_dp *intel_dp);
865 888
866 bool train_set_valid;
867
868 /* Displayport compliance testing */ 889 /* Displayport compliance testing */
869 unsigned long compliance_test_type; 890 unsigned long compliance_test_type;
870 unsigned long compliance_test_data; 891 unsigned long compliance_test_data;
@@ -947,22 +968,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
947 return dev_priv->plane_to_crtc_mapping[plane]; 968 return dev_priv->plane_to_crtc_mapping[plane];
948} 969}
949 970
950struct intel_unpin_work { 971struct intel_flip_work {
951 struct work_struct work; 972 struct work_struct unpin_work;
973 struct work_struct mmio_work;
974
952 struct drm_crtc *crtc; 975 struct drm_crtc *crtc;
953 struct drm_framebuffer *old_fb; 976 struct drm_framebuffer *old_fb;
954 struct drm_i915_gem_object *pending_flip_obj; 977 struct drm_i915_gem_object *pending_flip_obj;
955 struct drm_pending_vblank_event *event; 978 struct drm_pending_vblank_event *event;
956 atomic_t pending; 979 atomic_t pending;
957#define INTEL_FLIP_INACTIVE 0
958#define INTEL_FLIP_PENDING 1
959#define INTEL_FLIP_COMPLETE 2
960 u32 flip_count; 980 u32 flip_count;
961 u32 gtt_offset; 981 u32 gtt_offset;
962 struct drm_i915_gem_request *flip_queued_req; 982 struct drm_i915_gem_request *flip_queued_req;
963 u32 flip_queued_vblank; 983 u32 flip_queued_vblank;
964 u32 flip_ready_vblank; 984 u32 flip_ready_vblank;
965 bool enable_stall_check; 985 unsigned int rotation;
966}; 986};
967 987
968struct intel_load_detect_pipe { 988struct intel_load_detect_pipe {
@@ -1031,9 +1051,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1031void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1051void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1032void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1052void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1033void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1053void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1034void gen6_reset_rps_interrupts(struct drm_device *dev); 1054void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
1035void gen6_enable_rps_interrupts(struct drm_device *dev); 1055void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
1036void gen6_disable_rps_interrupts(struct drm_device *dev); 1056void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
1037u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); 1057u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
1038void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 1058void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
1039void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); 1059void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1112,14 +1132,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
1112void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); 1132void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
1113 1133
1114/* intel_display.c */ 1134/* intel_display.c */
1135void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
1136void intel_update_rawclk(struct drm_i915_private *dev_priv);
1115int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1137int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1116 const char *name, u32 reg, int ref_freq); 1138 const char *name, u32 reg, int ref_freq);
1117extern const struct drm_plane_funcs intel_plane_funcs; 1139extern const struct drm_plane_funcs intel_plane_funcs;
1118void intel_init_display_hooks(struct drm_i915_private *dev_priv); 1140void intel_init_display_hooks(struct drm_i915_private *dev_priv);
1119unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); 1141unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
1120bool intel_has_pending_fb_unpin(struct drm_device *dev); 1142bool intel_has_pending_fb_unpin(struct drm_device *dev);
1121void intel_mark_busy(struct drm_device *dev); 1143void intel_mark_busy(struct drm_i915_private *dev_priv);
1122void intel_mark_idle(struct drm_device *dev); 1144void intel_mark_idle(struct drm_i915_private *dev_priv);
1123void intel_crtc_restore_mode(struct drm_crtc *crtc); 1145void intel_crtc_restore_mode(struct drm_crtc *crtc);
1124int intel_display_suspend(struct drm_device *dev); 1146int intel_display_suspend(struct drm_device *dev);
1125void intel_encoder_destroy(struct drm_encoder *encoder); 1147void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1128,7 +1150,6 @@ struct intel_connector *intel_connector_alloc(void);
1128bool intel_connector_get_hw_state(struct intel_connector *connector); 1150bool intel_connector_get_hw_state(struct intel_connector *connector);
1129void intel_connector_attach_encoder(struct intel_connector *connector, 1151void intel_connector_attach_encoder(struct intel_connector *connector,
1130 struct intel_encoder *encoder); 1152 struct intel_encoder *encoder);
1131struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
1132struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 1153struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
1133 struct drm_crtc *crtc); 1154 struct drm_crtc *crtc);
1134enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); 1155enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1151,6 +1172,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1151 if (crtc->active) 1172 if (crtc->active)
1152 intel_wait_for_vblank(dev, pipe); 1173 intel_wait_for_vblank(dev, pipe);
1153} 1174}
1175
1176u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
1177
1154int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1178int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1155void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1179void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1156 struct intel_digital_port *dport, 1180 struct intel_digital_port *dport,
@@ -1164,14 +1188,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1164 struct drm_modeset_acquire_ctx *ctx); 1188 struct drm_modeset_acquire_ctx *ctx);
1165int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1189int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1166 unsigned int rotation); 1190 unsigned int rotation);
1191void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1167struct drm_framebuffer * 1192struct drm_framebuffer *
1168__intel_framebuffer_create(struct drm_device *dev, 1193__intel_framebuffer_create(struct drm_device *dev,
1169 struct drm_mode_fb_cmd2 *mode_cmd, 1194 struct drm_mode_fb_cmd2 *mode_cmd,
1170 struct drm_i915_gem_object *obj); 1195 struct drm_i915_gem_object *obj);
1171void intel_prepare_page_flip(struct drm_device *dev, int plane); 1196void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
1172void intel_finish_page_flip(struct drm_device *dev, int pipe); 1197void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
1173void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 1198void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
1174void intel_check_page_flip(struct drm_device *dev, int pipe);
1175int intel_prepare_plane_fb(struct drm_plane *plane, 1199int intel_prepare_plane_fb(struct drm_plane *plane,
1176 const struct drm_plane_state *new_state); 1200 const struct drm_plane_state *new_state);
1177void intel_cleanup_plane_fb(struct drm_plane *plane, 1201void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1228,23 +1252,25 @@ u32 intel_compute_tile_offset(int *x, int *y,
1228 const struct drm_framebuffer *fb, int plane, 1252 const struct drm_framebuffer *fb, int plane,
1229 unsigned int pitch, 1253 unsigned int pitch,
1230 unsigned int rotation); 1254 unsigned int rotation);
1231void intel_prepare_reset(struct drm_device *dev); 1255void intel_prepare_reset(struct drm_i915_private *dev_priv);
1232void intel_finish_reset(struct drm_device *dev); 1256void intel_finish_reset(struct drm_i915_private *dev_priv);
1233void hsw_enable_pc8(struct drm_i915_private *dev_priv); 1257void hsw_enable_pc8(struct drm_i915_private *dev_priv);
1234void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1258void hsw_disable_pc8(struct drm_i915_private *dev_priv);
1235void broxton_init_cdclk(struct drm_i915_private *dev_priv); 1259void bxt_init_cdclk(struct drm_i915_private *dev_priv);
1236void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); 1260void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
1237bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); 1261void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
1238void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); 1262void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
1239void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); 1263bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
1240void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); 1264 enum dpio_phy phy);
1265bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
1266 enum dpio_phy phy);
1241void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); 1267void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
1242void bxt_enable_dc9(struct drm_i915_private *dev_priv); 1268void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1243void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1269void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1244void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1270void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1245void skl_init_cdclk(struct drm_i915_private *dev_priv); 1271void skl_init_cdclk(struct drm_i915_private *dev_priv);
1246int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1247void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1272void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1273unsigned int skl_cdclk_get_vco(unsigned int freq);
1248void skl_enable_dc6(struct drm_i915_private *dev_priv); 1274void skl_enable_dc6(struct drm_i915_private *dev_priv);
1249void skl_disable_dc6(struct drm_i915_private *dev_priv); 1275void skl_disable_dc6(struct drm_i915_private *dev_priv);
1250void intel_dp_get_m_n(struct intel_crtc *crtc, 1276void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1252,8 +1278,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
1252void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1278void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1253int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1279int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1254bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1280bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1255 intel_clock_t *best_clock); 1281 struct dpll *best_clock);
1256int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); 1282int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
1257 1283
1258bool intel_crtc_active(struct drm_crtc *crtc); 1284bool intel_crtc_active(struct drm_crtc *crtc);
1259void hsw_enable_ips(struct intel_crtc *crtc); 1285void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1284,7 +1310,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
1284void intel_csr_ucode_resume(struct drm_i915_private *); 1310void intel_csr_ucode_resume(struct drm_i915_private *);
1285 1311
1286/* intel_dp.c */ 1312/* intel_dp.c */
1287void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1313bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1288bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1314bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1289 struct intel_connector *intel_connector); 1315 struct intel_connector *intel_connector);
1290void intel_dp_set_link_params(struct intel_dp *intel_dp, 1316void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1339,12 +1365,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1339bool 1365bool
1340intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1366intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1341 1367
1368static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1369{
1370 return ~((1 << lane_count) - 1) & 0xf;
1371}
1372
1373/* intel_dp_aux_backlight.c */
1374int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
1375
1342/* intel_dp_mst.c */ 1376/* intel_dp_mst.c */
1343int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1377int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1344void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1378void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1345/* intel_dsi.c */ 1379/* intel_dsi.c */
1346void intel_dsi_init(struct drm_device *dev); 1380void intel_dsi_init(struct drm_device *dev);
1347 1381
1382/* intel_dsi_dcs_backlight.c */
1383int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1348 1384
1349/* intel_dvo.c */ 1385/* intel_dvo.c */
1350void intel_dvo_init(struct drm_device *dev); 1386void intel_dvo_init(struct drm_device *dev);
@@ -1385,11 +1421,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
1385void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1421void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1386 struct drm_atomic_state *state); 1422 struct drm_atomic_state *state);
1387bool intel_fbc_is_active(struct drm_i915_private *dev_priv); 1423bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
1388void intel_fbc_pre_update(struct intel_crtc *crtc); 1424void intel_fbc_pre_update(struct intel_crtc *crtc,
1425 struct intel_crtc_state *crtc_state,
1426 struct intel_plane_state *plane_state);
1389void intel_fbc_post_update(struct intel_crtc *crtc); 1427void intel_fbc_post_update(struct intel_crtc *crtc);
1390void intel_fbc_init(struct drm_i915_private *dev_priv); 1428void intel_fbc_init(struct drm_i915_private *dev_priv);
1391void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); 1429void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
1392void intel_fbc_enable(struct intel_crtc *crtc); 1430void intel_fbc_enable(struct intel_crtc *crtc,
1431 struct intel_crtc_state *crtc_state,
1432 struct intel_plane_state *plane_state);
1393void intel_fbc_disable(struct intel_crtc *crtc); 1433void intel_fbc_disable(struct intel_crtc *crtc);
1394void intel_fbc_global_disable(struct drm_i915_private *dev_priv); 1434void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
1395void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1435void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1424,13 +1464,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1424 1464
1425 1465
1426/* intel_overlay.c */ 1466/* intel_overlay.c */
1427void intel_setup_overlay(struct drm_device *dev); 1467void intel_setup_overlay(struct drm_i915_private *dev_priv);
1428void intel_cleanup_overlay(struct drm_device *dev); 1468void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
1429int intel_overlay_switch_off(struct intel_overlay *overlay); 1469int intel_overlay_switch_off(struct intel_overlay *overlay);
1430int intel_overlay_put_image(struct drm_device *dev, void *data, 1470int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1431 struct drm_file *file_priv); 1471 struct drm_file *file_priv);
1432int intel_overlay_attrs(struct drm_device *dev, void *data, 1472int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1433 struct drm_file *file_priv); 1473 struct drm_file *file_priv);
1434void intel_overlay_reset(struct drm_i915_private *dev_priv); 1474void intel_overlay_reset(struct drm_i915_private *dev_priv);
1435 1475
1436 1476
@@ -1459,7 +1499,14 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1459 struct drm_display_mode *fixed_mode, 1499 struct drm_display_mode *fixed_mode,
1460 struct drm_connector *connector); 1500 struct drm_connector *connector);
1461void intel_backlight_register(struct drm_device *dev); 1501void intel_backlight_register(struct drm_device *dev);
1462void intel_backlight_unregister(struct drm_device *dev); 1502
1503#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
1504void intel_backlight_device_unregister(struct intel_connector *connector);
1505#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1506static inline void intel_backlight_device_unregister(struct intel_connector *connector)
1507{
1508}
1509#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1463 1510
1464 1511
1465/* intel_psr.c */ 1512/* intel_psr.c */
@@ -1601,21 +1648,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
1601void intel_pm_setup(struct drm_device *dev); 1648void intel_pm_setup(struct drm_device *dev);
1602void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1649void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1603void intel_gpu_ips_teardown(void); 1650void intel_gpu_ips_teardown(void);
1604void intel_init_gt_powersave(struct drm_device *dev); 1651void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
1605void intel_cleanup_gt_powersave(struct drm_device *dev); 1652void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
1606void intel_enable_gt_powersave(struct drm_device *dev); 1653void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
1607void intel_disable_gt_powersave(struct drm_device *dev); 1654void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
1608void intel_suspend_gt_powersave(struct drm_device *dev); 1655void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
1609void intel_reset_gt_powersave(struct drm_device *dev); 1656void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
1610void gen6_update_ring_freq(struct drm_device *dev); 1657void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
1611void gen6_rps_busy(struct drm_i915_private *dev_priv); 1658void gen6_rps_busy(struct drm_i915_private *dev_priv);
1612void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 1659void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
1613void gen6_rps_idle(struct drm_i915_private *dev_priv); 1660void gen6_rps_idle(struct drm_i915_private *dev_priv);
1614void gen6_rps_boost(struct drm_i915_private *dev_priv, 1661void gen6_rps_boost(struct drm_i915_private *dev_priv,
1615 struct intel_rps_client *rps, 1662 struct intel_rps_client *rps,
1616 unsigned long submitted); 1663 unsigned long submitted);
1617void intel_queue_rps_boost_for_request(struct drm_device *dev, 1664void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
1618 struct drm_i915_gem_request *req);
1619void vlv_wm_get_hw_state(struct drm_device *dev); 1665void vlv_wm_get_hw_state(struct drm_device *dev);
1620void ilk_wm_get_hw_state(struct drm_device *dev); 1666void ilk_wm_get_hw_state(struct drm_device *dev);
1621void skl_wm_get_hw_state(struct drm_device *dev); 1667void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1623,7 +1669,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1623 struct skl_ddb_allocation *ddb /* out */); 1669 struct skl_ddb_allocation *ddb /* out */);
1624uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1670uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1625bool ilk_disable_lp_wm(struct drm_device *dev); 1671bool ilk_disable_lp_wm(struct drm_device *dev);
1626int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); 1672int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1673static inline int intel_enable_rc6(void)
1674{
1675 return i915.enable_rc6;
1676}
1627 1677
1628/* intel_sdvo.c */ 1678/* intel_sdvo.c */
1629bool intel_sdvo_init(struct drm_device *dev, 1679bool intel_sdvo_init(struct drm_device *dev,
@@ -1635,7 +1685,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1635int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1685int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1636 struct drm_file *file_priv); 1686 struct drm_file *file_priv);
1637void intel_pipe_update_start(struct intel_crtc *crtc); 1687void intel_pipe_update_start(struct intel_crtc *crtc);
1638void intel_pipe_update_end(struct intel_crtc *crtc); 1688void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
1639 1689
1640/* intel_tv.c */ 1690/* intel_tv.c */
1641void intel_tv_init(struct drm_device *dev); 1691void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 366ad6c67ce4..b444d0e35a98 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
534 enum port port; 534 enum port port;
535 u32 tmp;
536 535
537 DRM_DEBUG_KMS("\n"); 536 DRM_DEBUG_KMS("\n");
538 537
@@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
551 550
552 msleep(intel_dsi->panel_on_delay); 551 msleep(intel_dsi->panel_on_delay);
553 552
554 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 553 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
554 u32 val;
555
555 /* Disable DPOunit clock gating, can stall pipe */ 556 /* Disable DPOunit clock gating, can stall pipe */
556 tmp = I915_READ(DSPCLK_GATE_D); 557 val = I915_READ(DSPCLK_GATE_D);
557 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 558 val |= DPOUNIT_CLOCK_GATE_DISABLE;
558 I915_WRITE(DSPCLK_GATE_D, tmp); 559 I915_WRITE(DSPCLK_GATE_D, val);
559 } 560 }
560 561
561 /* put device in ready state */ 562 /* put device in ready state */
@@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
693 694
694 intel_dsi_clear_device_ready(encoder); 695 intel_dsi_clear_device_ready(encoder);
695 696
696 if (!IS_BROXTON(dev_priv)) { 697 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
697 u32 val; 698 u32 val;
698 699
699 val = I915_READ(DSPCLK_GATE_D); 700 val = I915_READ(DSPCLK_GATE_D);
@@ -1171,6 +1172,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
1171 if (intel_dsi->clock_stop) 1172 if (intel_dsi->clock_stop)
1172 tmp |= CLOCKSTOP; 1173 tmp |= CLOCKSTOP;
1173 1174
1175 if (IS_BROXTON(dev_priv)) {
1176 tmp |= BXT_DPHY_DEFEATURE_EN;
1177 if (!is_cmd_mode(intel_dsi))
1178 tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
1179 }
1180
1174 for_each_dsi_port(port, intel_dsi->ports) { 1181 for_each_dsi_port(port, intel_dsi->ports) {
1175 I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); 1182 I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
1176 1183
@@ -1378,12 +1385,12 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
1378static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { 1385static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
1379 .get_modes = intel_dsi_get_modes, 1386 .get_modes = intel_dsi_get_modes,
1380 .mode_valid = intel_dsi_mode_valid, 1387 .mode_valid = intel_dsi_mode_valid,
1381 .best_encoder = intel_best_encoder,
1382}; 1388};
1383 1389
1384static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1390static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1385 .dpms = drm_atomic_helper_connector_dpms, 1391 .dpms = drm_atomic_helper_connector_dpms,
1386 .detect = intel_dsi_detect, 1392 .detect = intel_dsi_detect,
1393 .early_unregister = intel_connector_unregister,
1387 .destroy = intel_dsi_connector_destroy, 1394 .destroy = intel_dsi_connector_destroy,
1388 .fill_modes = drm_helper_probe_single_connector_modes, 1395 .fill_modes = drm_helper_probe_single_connector_modes,
1389 .set_property = intel_dsi_set_property, 1396 .set_property = intel_dsi_set_property,
@@ -1449,7 +1456,7 @@ void intel_dsi_init(struct drm_device *dev)
1449 connector = &intel_connector->base; 1456 connector = &intel_connector->base;
1450 1457
1451 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, 1458 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1452 NULL); 1459 "DSI %c", port_name(port));
1453 1460
1454 intel_encoder->compute_config = intel_dsi_compute_config; 1461 intel_encoder->compute_config = intel_dsi_compute_config;
1455 intel_encoder->pre_enable = intel_dsi_pre_enable; 1462 intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1460,7 +1467,6 @@ void intel_dsi_init(struct drm_device *dev)
1460 intel_encoder->get_config = intel_dsi_get_config; 1467 intel_encoder->get_config = intel_dsi_get_config;
1461 1468
1462 intel_connector->get_hw_state = intel_connector_get_hw_state; 1469 intel_connector->get_hw_state = intel_connector_get_hw_state;
1463 intel_connector->unregister = intel_connector_unregister;
1464 1470
1465 /* 1471 /*
1466 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI 1472 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1473,10 +1479,42 @@ void intel_dsi_init(struct drm_device *dev)
1473 else 1479 else
1474 intel_encoder->crtc_mask = BIT(PIPE_B); 1480 intel_encoder->crtc_mask = BIT(PIPE_B);
1475 1481
1476 if (dev_priv->vbt.dsi.config->dual_link) 1482 if (dev_priv->vbt.dsi.config->dual_link) {
1477 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); 1483 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
1478 else 1484
1485 switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
1486 case DL_DCS_PORT_A:
1487 intel_dsi->dcs_backlight_ports = BIT(PORT_A);
1488 break;
1489 case DL_DCS_PORT_C:
1490 intel_dsi->dcs_backlight_ports = BIT(PORT_C);
1491 break;
1492 default:
1493 case DL_DCS_PORT_A_AND_C:
1494 intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
1495 break;
1496 }
1497
1498 switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
1499 case DL_DCS_PORT_A:
1500 intel_dsi->dcs_cabc_ports = BIT(PORT_A);
1501 break;
1502 case DL_DCS_PORT_C:
1503 intel_dsi->dcs_cabc_ports = BIT(PORT_C);
1504 break;
1505 default:
1506 case DL_DCS_PORT_A_AND_C:
1507 intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
1508 break;
1509 }
1510 } else {
1479 intel_dsi->ports = BIT(port); 1511 intel_dsi->ports = BIT(port);
1512 intel_dsi->dcs_backlight_ports = BIT(port);
1513 intel_dsi->dcs_cabc_ports = BIT(port);
1514 }
1515
1516 if (!dev_priv->vbt.dsi.config->cabc_supported)
1517 intel_dsi->dcs_cabc_ports = 0;
1480 1518
1481 /* Create a DSI host (and a device) for each port. */ 1519 /* Create a DSI host (and a device) for each port. */
1482 for_each_dsi_port(port, intel_dsi->ports) { 1520 for_each_dsi_port(port, intel_dsi->ports) {
@@ -1545,6 +1583,9 @@ void intel_dsi_init(struct drm_device *dev)
1545 goto err; 1583 goto err;
1546 } 1584 }
1547 1585
1586 connector->display_info.width_mm = fixed_mode->width_mm;
1587 connector->display_info.height_mm = fixed_mode->height_mm;
1588
1548 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1589 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1549 1590
1550 intel_dsi_add_properties(intel_connector); 1591 intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
78 78
79 u8 escape_clk_div; 79 u8 escape_clk_div;
80 u8 dual_link; 80 u8 dual_link;
81
82 u16 dcs_backlight_ports;
83 u16 dcs_cabc_ports;
84
81 u8 pixel_overlap; 85 u8 pixel_overlap;
82 u32 port_bits; 86 u32 port_bits;
83 u32 bw_timer; 87 u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..f0dc427743f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Deepak M <m.deepak at intel.com>
24 */
25
26#include "intel_drv.h"
27#include "intel_dsi.h"
28#include "i915_drv.h"
29#include <video/mipi_display.h>
30#include <drm/drm_mipi_dsi.h>
31
32#define CONTROL_DISPLAY_BCTRL (1 << 5)
33#define CONTROL_DISPLAY_DD (1 << 3)
34#define CONTROL_DISPLAY_BL (1 << 2)
35
36#define POWER_SAVE_OFF (0 << 0)
37#define POWER_SAVE_LOW (1 << 0)
38#define POWER_SAVE_MEDIUM (2 << 0)
39#define POWER_SAVE_HIGH (3 << 0)
40#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
41
42#define PANEL_PWM_MAX_VALUE 0xFF
43
44static u32 dcs_get_backlight(struct intel_connector *connector)
45{
46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device;
49 u8 data;
50 enum port port;
51
52 /* FIXME: Need to take care of 16 bit brightness level */
53 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
54 dsi_device = intel_dsi->dsi_hosts[port]->device;
55 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
56 &data, sizeof(data));
57 break;
58 }
59
60 return data;
61}
62
63static void dcs_set_backlight(struct intel_connector *connector, u32 level)
64{
65 struct intel_encoder *encoder = connector->encoder;
66 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
67 struct mipi_dsi_device *dsi_device;
68 u8 data = level;
69 enum port port;
70
71 /* FIXME: Need to take care of 16 bit brightness level */
72 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
73 dsi_device = intel_dsi->dsi_hosts[port]->device;
74 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
75 &data, sizeof(data));
76 }
77}
78
79static void dcs_disable_backlight(struct intel_connector *connector)
80{
81 struct intel_encoder *encoder = connector->encoder;
82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
83 struct mipi_dsi_device *dsi_device;
84 enum port port;
85
86 dcs_set_backlight(connector, 0);
87
88 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
89 u8 cabc = POWER_SAVE_OFF;
90
91 dsi_device = intel_dsi->dsi_hosts[port]->device;
92 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
93 &cabc, sizeof(cabc));
94 }
95
96 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
97 u8 ctrl = 0;
98
99 dsi_device = intel_dsi->dsi_hosts[port]->device;
100
101 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
102 &ctrl, sizeof(ctrl));
103
104 ctrl &= ~CONTROL_DISPLAY_BL;
105 ctrl &= ~CONTROL_DISPLAY_DD;
106 ctrl &= ~CONTROL_DISPLAY_BCTRL;
107
108 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
109 &ctrl, sizeof(ctrl));
110 }
111}
112
113static void dcs_enable_backlight(struct intel_connector *connector)
114{
115 struct intel_encoder *encoder = connector->encoder;
116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
117 struct intel_panel *panel = &connector->panel;
118 struct mipi_dsi_device *dsi_device;
119 enum port port;
120
121 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
122 u8 ctrl = 0;
123
124 dsi_device = intel_dsi->dsi_hosts[port]->device;
125
126 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
127 &ctrl, sizeof(ctrl));
128
129 ctrl |= CONTROL_DISPLAY_BL;
130 ctrl |= CONTROL_DISPLAY_DD;
131 ctrl |= CONTROL_DISPLAY_BCTRL;
132
133 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
134 &ctrl, sizeof(ctrl));
135 }
136
137 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
138 u8 cabc = POWER_SAVE_MEDIUM;
139
140 dsi_device = intel_dsi->dsi_hosts[port]->device;
141 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
142 &cabc, sizeof(cabc));
143 }
144
145 dcs_set_backlight(connector, panel->backlight.level);
146}
147
148static int dcs_setup_backlight(struct intel_connector *connector,
149 enum pipe unused)
150{
151 struct intel_panel *panel = &connector->panel;
152
153 panel->backlight.max = PANEL_PWM_MAX_VALUE;
154 panel->backlight.level = PANEL_PWM_MAX_VALUE;
155
156 return 0;
157}
158
159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
160{
161 struct drm_device *dev = intel_connector->base.dev;
162 struct drm_i915_private *dev_priv = dev->dev_private;
163 struct intel_encoder *encoder = intel_connector->encoder;
164 struct intel_panel *panel = &intel_connector->panel;
165
166 if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
167 return -ENODEV;
168
169 if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
170 return -EINVAL;
171
172 panel->backlight.setup = dcs_setup_backlight;
173 panel->backlight.enable = dcs_enable_backlight;
174 panel->backlight.disable = dcs_disable_backlight;
175 panel->backlight.set = dcs_set_backlight;
176 panel->backlight.get = dcs_get_backlight;
177
178 return 0;
179}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..f122484bedfc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, 95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
96}; 96};
97 97
98#define CHV_GPIO_IDX_START_N 0
99#define CHV_GPIO_IDX_START_E 73
100#define CHV_GPIO_IDX_START_SW 100
101#define CHV_GPIO_IDX_START_SE 198
102
103#define CHV_VBT_MAX_PINS_PER_FMLY 15
104
105#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
106#define CHV_GPIO_GPIOEN (1 << 15)
107#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
108#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
109#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
110#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
111#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
112
113#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
114#define CHV_GPIO_CFGLOCK (1 << 31)
115
98static inline enum port intel_dsi_seq_port_to_port(u8 port) 116static inline enum port intel_dsi_seq_port_to_port(u8 port)
99{ 117{
100 return port ? PORT_C : PORT_A; 118 return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
203 map = &vlv_gpio_table[gpio_index]; 221 map = &vlv_gpio_table[gpio_index];
204 222
205 if (dev_priv->vbt.dsi.seq_version >= 3) { 223 if (dev_priv->vbt.dsi.seq_version >= 3) {
206 DRM_DEBUG_KMS("GPIO element v3 not supported\n"); 224 /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
207 return; 225 port = IOSF_PORT_GPIO_NC;
208 } else { 226 } else {
209 if (gpio_source == 0) { 227 if (gpio_source == 0) {
210 port = IOSF_PORT_GPIO_NC; 228 port = IOSF_PORT_GPIO_NC;
211 } else if (gpio_source == 1) { 229 } else if (gpio_source == 1) {
212 port = IOSF_PORT_GPIO_SC; 230 DRM_DEBUG_KMS("SC gpio not supported\n");
231 return;
213 } else { 232 } else {
214 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); 233 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
215 return; 234 return;
@@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
231 mutex_unlock(&dev_priv->sb_lock); 250 mutex_unlock(&dev_priv->sb_lock);
232} 251}
233 252
253static void chv_exec_gpio(struct drm_i915_private *dev_priv,
254 u8 gpio_source, u8 gpio_index, bool value)
255{
256 u16 cfg0, cfg1;
257 u16 family_num;
258 u8 port;
259
260 if (dev_priv->vbt.dsi.seq_version >= 3) {
261 if (gpio_index >= CHV_GPIO_IDX_START_SE) {
262 /* XXX: it's unclear whether 255->57 is part of SE. */
263 gpio_index -= CHV_GPIO_IDX_START_SE;
264 port = CHV_IOSF_PORT_GPIO_SE;
265 } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
266 gpio_index -= CHV_GPIO_IDX_START_SW;
267 port = CHV_IOSF_PORT_GPIO_SW;
268 } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
269 gpio_index -= CHV_GPIO_IDX_START_E;
270 port = CHV_IOSF_PORT_GPIO_E;
271 } else {
272 port = CHV_IOSF_PORT_GPIO_N;
273 }
274 } else {
275 /* XXX: The spec is unclear about CHV GPIO on seq v2 */
276 if (gpio_source != 0) {
277 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
278 return;
279 }
280
281 if (gpio_index >= CHV_GPIO_IDX_START_E) {
282 DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
283 gpio_index);
284 return;
285 }
286
287 port = CHV_IOSF_PORT_GPIO_N;
288 }
289
290 family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
291 gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
292
293 cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
294 cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
295
296 mutex_lock(&dev_priv->sb_lock);
297 vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
298 vlv_iosf_sb_write(dev_priv, port, cfg0,
299 CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
300 mutex_unlock(&dev_priv->sb_lock);
301}
302
234static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
235{ 304{
236 struct drm_device *dev = intel_dsi->base.base.dev; 305 struct drm_device *dev = intel_dsi->base.base.dev;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
254 323
255 if (IS_VALLEYVIEW(dev_priv)) 324 if (IS_VALLEYVIEW(dev_priv))
256 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); 325 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
326 else if (IS_CHERRYVIEW(dev_priv))
327 chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
257 else 328 else
258 DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); 329 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
259 330
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..60e4ddf2ec6d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
341static const struct drm_connector_funcs intel_dvo_connector_funcs = { 341static const struct drm_connector_funcs intel_dvo_connector_funcs = {
342 .dpms = drm_atomic_helper_connector_dpms, 342 .dpms = drm_atomic_helper_connector_dpms,
343 .detect = intel_dvo_detect, 343 .detect = intel_dvo_detect,
344 .early_unregister = intel_connector_unregister,
344 .destroy = intel_dvo_destroy, 345 .destroy = intel_dvo_destroy,
345 .fill_modes = drm_helper_probe_single_connector_modes, 346 .fill_modes = drm_helper_probe_single_connector_modes,
346 .atomic_get_property = intel_connector_atomic_get_property, 347 .atomic_get_property = intel_connector_atomic_get_property,
@@ -351,7 +352,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
351static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 352static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
352 .mode_valid = intel_dvo_mode_valid, 353 .mode_valid = intel_dvo_mode_valid,
353 .get_modes = intel_dvo_get_modes, 354 .get_modes = intel_dvo_get_modes,
354 .best_encoder = intel_best_encoder,
355}; 355};
356 356
357static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 357static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -406,6 +406,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
406 return mode; 406 return mode;
407} 407}
408 408
409static char intel_dvo_port_name(i915_reg_t dvo_reg)
410{
411 if (i915_mmio_reg_equal(dvo_reg, DVOA))
412 return 'A';
413 else if (i915_mmio_reg_equal(dvo_reg, DVOB))
414 return 'B';
415 else if (i915_mmio_reg_equal(dvo_reg, DVOC))
416 return 'C';
417 else
418 return '?';
419}
420
409void intel_dvo_init(struct drm_device *dev) 421void intel_dvo_init(struct drm_device *dev)
410{ 422{
411 struct drm_i915_private *dev_priv = dev->dev_private; 423 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -428,8 +440,6 @@ void intel_dvo_init(struct drm_device *dev)
428 intel_dvo->attached_connector = intel_connector; 440 intel_dvo->attached_connector = intel_connector;
429 441
430 intel_encoder = &intel_dvo->base; 442 intel_encoder = &intel_dvo->base;
431 drm_encoder_init(dev, &intel_encoder->base,
432 &intel_dvo_enc_funcs, encoder_type, NULL);
433 443
434 intel_encoder->disable = intel_disable_dvo; 444 intel_encoder->disable = intel_disable_dvo;
435 intel_encoder->enable = intel_enable_dvo; 445 intel_encoder->enable = intel_enable_dvo;
@@ -438,7 +448,6 @@ void intel_dvo_init(struct drm_device *dev)
438 intel_encoder->compute_config = intel_dvo_compute_config; 448 intel_encoder->compute_config = intel_dvo_compute_config;
439 intel_encoder->pre_enable = intel_dvo_pre_enable; 449 intel_encoder->pre_enable = intel_dvo_pre_enable;
440 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 450 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
441 intel_connector->unregister = intel_connector_unregister;
442 451
443 /* Now, try to find a controller */ 452 /* Now, try to find a controller */
444 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 453 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -496,6 +505,10 @@ void intel_dvo_init(struct drm_device *dev)
496 if (!dvoinit) 505 if (!dvoinit)
497 continue; 506 continue;
498 507
508 drm_encoder_init(dev, &intel_encoder->base,
509 &intel_dvo_enc_funcs, encoder_type,
510 "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
511
499 intel_encoder->type = INTEL_OUTPUT_DVO; 512 intel_encoder->type = INTEL_OUTPUT_DVO;
500 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 513 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
501 switch (dvo->type) { 514 switch (dvo->type) {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..a19944b6dc25 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -374,8 +374,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
374 * @dev_priv: i915 device instance 374 * @dev_priv: i915 device instance
375 * 375 *
376 * This function is used to verify the current state of FBC. 376 * This function is used to verify the current state of FBC.
377 *
377 * FIXME: This should be tracked in the plane config eventually 378 * FIXME: This should be tracked in the plane config eventually
378 * instead of queried at runtime for most callers. 379 * instead of queried at runtime for most callers.
379 */ 380 */
380bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 381bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
381{ 382{
@@ -480,10 +481,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
480 intel_fbc_hw_deactivate(dev_priv); 481 intel_fbc_hw_deactivate(dev_priv);
481} 482}
482 483
483static bool multiple_pipes_ok(struct intel_crtc *crtc) 484static bool multiple_pipes_ok(struct intel_crtc *crtc,
485 struct intel_plane_state *plane_state)
484{ 486{
485 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 487 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
486 struct drm_plane *primary = crtc->base.primary;
487 struct intel_fbc *fbc = &dev_priv->fbc; 488 struct intel_fbc *fbc = &dev_priv->fbc;
488 enum pipe pipe = crtc->pipe; 489 enum pipe pipe = crtc->pipe;
489 490
@@ -491,9 +492,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc)
491 if (!no_fbc_on_multiple_pipes(dev_priv)) 492 if (!no_fbc_on_multiple_pipes(dev_priv))
492 return true; 493 return true;
493 494
494 WARN_ON(!drm_modeset_is_locked(&primary->mutex)); 495 if (plane_state->visible)
495
496 if (to_intel_plane_state(primary->state)->visible)
497 fbc->visible_pipes_mask |= (1 << pipe); 496 fbc->visible_pipes_mask |= (1 << pipe);
498 else 497 else
499 fbc->visible_pipes_mask &= ~(1 << pipe); 498 fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -708,21 +707,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
708 return effective_w <= max_w && effective_h <= max_h; 707 return effective_w <= max_w && effective_h <= max_h;
709} 708}
710 709
711static void intel_fbc_update_state_cache(struct intel_crtc *crtc) 710static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
711 struct intel_crtc_state *crtc_state,
712 struct intel_plane_state *plane_state)
712{ 713{
713 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 714 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
714 struct intel_fbc *fbc = &dev_priv->fbc; 715 struct intel_fbc *fbc = &dev_priv->fbc;
715 struct intel_fbc_state_cache *cache = &fbc->state_cache; 716 struct intel_fbc_state_cache *cache = &fbc->state_cache;
716 struct intel_crtc_state *crtc_state =
717 to_intel_crtc_state(crtc->base.state);
718 struct intel_plane_state *plane_state =
719 to_intel_plane_state(crtc->base.primary->state);
720 struct drm_framebuffer *fb = plane_state->base.fb; 717 struct drm_framebuffer *fb = plane_state->base.fb;
721 struct drm_i915_gem_object *obj; 718 struct drm_i915_gem_object *obj;
722 719
723 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
724 WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
725
726 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 720 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
727 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 721 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
728 cache->crtc.hsw_bdw_pixel_rate = 722 cache->crtc.hsw_bdw_pixel_rate =
@@ -740,7 +734,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
740 734
741 /* FIXME: We lack the proper locking here, so only run this on the 735 /* FIXME: We lack the proper locking here, so only run this on the
742 * platforms that need. */ 736 * platforms that need. */
743 if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) 737 if (IS_GEN(dev_priv, 5, 6))
744 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); 738 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
745 cache->fb.pixel_format = fb->pixel_format; 739 cache->fb.pixel_format = fb->pixel_format;
746 cache->fb.stride = fb->pitches[0]; 740 cache->fb.stride = fb->pitches[0];
@@ -824,10 +818,9 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
824{ 818{
825 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 819 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
826 struct intel_fbc *fbc = &dev_priv->fbc; 820 struct intel_fbc *fbc = &dev_priv->fbc;
827 bool enable_by_default = IS_HASWELL(dev_priv) || 821 bool enable_by_default = IS_BROADWELL(dev_priv);
828 IS_BROADWELL(dev_priv);
829 822
830 if (intel_vgpu_active(dev_priv->dev)) { 823 if (intel_vgpu_active(dev_priv)) {
831 fbc->no_fbc_reason = "VGPU is active"; 824 fbc->no_fbc_reason = "VGPU is active";
832 return false; 825 return false;
833 } 826 }
@@ -887,7 +880,9 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
887 return memcmp(params1, params2, sizeof(*params1)) == 0; 880 return memcmp(params1, params2, sizeof(*params1)) == 0;
888} 881}
889 882
890void intel_fbc_pre_update(struct intel_crtc *crtc) 883void intel_fbc_pre_update(struct intel_crtc *crtc,
884 struct intel_crtc_state *crtc_state,
885 struct intel_plane_state *plane_state)
891{ 886{
892 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 887 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
893 struct intel_fbc *fbc = &dev_priv->fbc; 888 struct intel_fbc *fbc = &dev_priv->fbc;
@@ -897,7 +892,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
897 892
898 mutex_lock(&fbc->lock); 893 mutex_lock(&fbc->lock);
899 894
900 if (!multiple_pipes_ok(crtc)) { 895 if (!multiple_pipes_ok(crtc, plane_state)) {
901 fbc->no_fbc_reason = "more than one pipe active"; 896 fbc->no_fbc_reason = "more than one pipe active";
902 goto deactivate; 897 goto deactivate;
903 } 898 }
@@ -905,7 +900,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
905 if (!fbc->enabled || fbc->crtc != crtc) 900 if (!fbc->enabled || fbc->crtc != crtc)
906 goto unlock; 901 goto unlock;
907 902
908 intel_fbc_update_state_cache(crtc); 903 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
909 904
910deactivate: 905deactivate:
911 intel_fbc_deactivate(dev_priv); 906 intel_fbc_deactivate(dev_priv);
@@ -1089,7 +1084,9 @@ out:
1089 * intel_fbc_enable multiple times for the same pipe without an 1084 * intel_fbc_enable multiple times for the same pipe without an
1090 * intel_fbc_disable in the middle, as long as it is deactivated. 1085 * intel_fbc_disable in the middle, as long as it is deactivated.
1091 */ 1086 */
1092void intel_fbc_enable(struct intel_crtc *crtc) 1087void intel_fbc_enable(struct intel_crtc *crtc,
1088 struct intel_crtc_state *crtc_state,
1089 struct intel_plane_state *plane_state)
1093{ 1090{
1094 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1091 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1095 struct intel_fbc *fbc = &dev_priv->fbc; 1092 struct intel_fbc *fbc = &dev_priv->fbc;
@@ -1102,19 +1099,19 @@ void intel_fbc_enable(struct intel_crtc *crtc)
1102 if (fbc->enabled) { 1099 if (fbc->enabled) {
1103 WARN_ON(fbc->crtc == NULL); 1100 WARN_ON(fbc->crtc == NULL);
1104 if (fbc->crtc == crtc) { 1101 if (fbc->crtc == crtc) {
1105 WARN_ON(!crtc->config->enable_fbc); 1102 WARN_ON(!crtc_state->enable_fbc);
1106 WARN_ON(fbc->active); 1103 WARN_ON(fbc->active);
1107 } 1104 }
1108 goto out; 1105 goto out;
1109 } 1106 }
1110 1107
1111 if (!crtc->config->enable_fbc) 1108 if (!crtc_state->enable_fbc)
1112 goto out; 1109 goto out;
1113 1110
1114 WARN_ON(fbc->active); 1111 WARN_ON(fbc->active);
1115 WARN_ON(fbc->crtc != NULL); 1112 WARN_ON(fbc->crtc != NULL);
1116 1113
1117 intel_fbc_update_state_cache(crtc); 1114 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1118 if (intel_fbc_alloc_cfb(crtc)) { 1115 if (intel_fbc_alloc_cfb(crtc)) {
1119 fbc->no_fbc_reason = "not enough stolen memory"; 1116 fbc->no_fbc_reason = "not enough stolen memory";
1120 goto out; 1117 goto out;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..4babefc51eb2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
150 if (size * 2 < ggtt->stolen_usable_size) 150 if (size * 2 < ggtt->stolen_usable_size)
151 obj = i915_gem_object_create_stolen(dev, size); 151 obj = i915_gem_object_create_stolen(dev, size);
152 if (obj == NULL) 152 if (obj == NULL)
153 obj = i915_gem_alloc_object(dev, size); 153 obj = i915_gem_object_create(dev, size);
154 if (!obj) { 154 if (IS_ERR(obj)) {
155 DRM_ERROR("failed to allocate framebuffer\n"); 155 DRM_ERROR("failed to allocate framebuffer\n");
156 ret = -ENOMEM; 156 ret = PTR_ERR(obj);
157 goto out; 157 goto out;
158 } 158 }
159 159
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
186 struct i915_ggtt *ggtt = &dev_priv->ggtt; 186 struct i915_ggtt *ggtt = &dev_priv->ggtt;
187 struct fb_info *info; 187 struct fb_info *info;
188 struct drm_framebuffer *fb; 188 struct drm_framebuffer *fb;
189 struct i915_vma *vma;
189 struct drm_i915_gem_object *obj; 190 struct drm_i915_gem_object *obj;
190 int size, ret;
191 bool prealloc = false; 191 bool prealloc = false;
192 void *vaddr;
193 int ret;
192 194
193 if (intel_fb && 195 if (intel_fb &&
194 (sizes->fb_width > intel_fb->base.width || 196 (sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
214 } 216 }
215 217
216 obj = intel_fb->obj; 218 obj = intel_fb->obj;
217 size = obj->base.size;
218 219
219 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
220 221
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
244 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 245 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
245 info->fbops = &intelfb_ops; 246 info->fbops = &intelfb_ops;
246 247
248 vma = i915_gem_obj_to_ggtt(obj);
249
247 /* setup aperture base/size for vesafb takeover */ 250 /* setup aperture base/size for vesafb takeover */
248 info->apertures->ranges[0].base = dev->mode_config.fb_base; 251 info->apertures->ranges[0].base = dev->mode_config.fb_base;
249 info->apertures->ranges[0].size = ggtt->mappable_end; 252 info->apertures->ranges[0].size = ggtt->mappable_end;
250 253
251 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); 254 info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
252 info->fix.smem_len = size; 255 info->fix.smem_len = vma->node.size;
253 256
254 info->screen_base = 257 vaddr = i915_vma_pin_iomap(vma);
255 ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), 258 if (IS_ERR(vaddr)) {
256 size);
257 if (!info->screen_base) {
258 DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); 259 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
259 ret = -ENOSPC; 260 ret = PTR_ERR(vaddr);
260 goto out_destroy_fbi; 261 goto out_destroy_fbi;
261 } 262 }
262 info->screen_size = size; 263 info->screen_base = vaddr;
264 info->screen_size = vma->node.size;
263 265
264 /* This driver doesn't need a VT switch to restore the mode on resume */ 266 /* This driver doesn't need a VT switch to restore the mode on resume */
265 info->skip_vt_switch = true; 267 info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
287out_destroy_fbi: 289out_destroy_fbi:
288 drm_fb_helper_release_fbi(helper); 290 drm_fb_helper_release_fbi(helper);
289out_unpin: 291out_unpin:
290 i915_gem_object_ggtt_unpin(obj); 292 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
291out_unlock: 293out_unlock:
292 mutex_unlock(&dev->struct_mutex); 294 mutex_unlock(&dev->struct_mutex);
293 return ret; 295 return ret;
@@ -488,10 +490,10 @@ retry:
488 } 490 }
489 crtcs[i] = new_crtc; 491 crtcs[i] = new_crtc;
490 492
491 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", 493 DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
492 connector->name, 494 connector->name,
493 pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
494 connector->state->crtc->base.id, 495 connector->state->crtc->base.id,
496 connector->state->crtc->name,
495 modes[i]->hdisplay, modes[i]->vdisplay, 497 modes[i]->hdisplay, modes[i]->vdisplay,
496 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 498 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
497 499
@@ -550,7 +552,10 @@ static void intel_fbdev_destroy(struct drm_device *dev,
550 drm_fb_helper_fini(&ifbdev->helper); 552 drm_fb_helper_fini(&ifbdev->helper);
551 553
552 if (ifbdev->fb) { 554 if (ifbdev->fb) {
553 drm_framebuffer_unregister_private(&ifbdev->fb->base); 555 mutex_lock(&dev->struct_mutex);
556 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
557 mutex_unlock(&dev->struct_mutex);
558
554 drm_framebuffer_remove(&ifbdev->fb->base); 559 drm_framebuffer_remove(&ifbdev->fb->base);
555 } 560 }
556} 561}
@@ -717,8 +722,6 @@ int intel_fbdev_init(struct drm_device *dev)
717 return ret; 722 return ret;
718 } 723 }
719 724
720 ifbdev->helper.atomic = true;
721
722 dev_priv->fbdev = ifbdev; 725 dev_priv->fbdev = ifbdev;
723 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); 726 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
724 727
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..4df80cc9a291 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -48,14 +48,23 @@ struct drm_i915_gem_request;
48 * queue (a circular array of work items), again described in the process 48 * queue (a circular array of work items), again described in the process
49 * descriptor. Work queue pages are mapped momentarily as required. 49 * descriptor. Work queue pages are mapped momentarily as required.
50 * 50 *
51 * Finally, we also keep a few statistics here, including the number of 51 * We also keep a few statistics on failures. Ideally, these should all
52 * submissions to each engine, and a record of the last submission failure 52 * be zero!
53 * (if any). 53 * no_wq_space: times that the submission pre-check found no space was
54 * available in the work queue (note, the queue is shared,
55 * not per-engine). It is OK for this to be nonzero, but
56 * it should not be huge!
57 * q_fail: failed to enqueue a work item. This should never happen,
58 * because we check for space beforehand.
59 * b_fail: failed to ring the doorbell. This should never happen, unless
60 * somehow the hardware misbehaves, or maybe if the GuC firmware
61 * crashes? We probably need to reset the GPU to recover.
62 * retcode: errno from last guc_submit()
54 */ 63 */
55struct i915_guc_client { 64struct i915_guc_client {
56 struct drm_i915_gem_object *client_obj; 65 struct drm_i915_gem_object *client_obj;
57 void *client_base; /* first page (only) of above */ 66 void *client_base; /* first page (only) of above */
58 struct intel_context *owner; 67 struct i915_gem_context *owner;
59 struct intel_guc *guc; 68 struct intel_guc *guc;
60 uint32_t priority; 69 uint32_t priority;
61 uint32_t ctx_index; 70 uint32_t ctx_index;
@@ -71,12 +80,13 @@ struct i915_guc_client {
71 uint32_t wq_tail; 80 uint32_t wq_tail;
72 uint32_t unused; /* Was 'wq_head' */ 81 uint32_t unused; /* Was 'wq_head' */
73 82
74 /* GuC submission statistics & status */ 83 uint32_t no_wq_space;
75 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 84 uint32_t q_fail; /* No longer used */
76 uint32_t q_fail;
77 uint32_t b_fail; 85 uint32_t b_fail;
78 int retcode; 86 int retcode;
79 int spare; /* pad to 32 DWords */ 87
88 /* Per-engine counts of GuC submissions */
89 uint64_t submissions[GUC_MAX_ENGINES_NUM];
80}; 90};
81 91
82enum intel_guc_fw_status { 92enum intel_guc_fw_status {
@@ -138,20 +148,19 @@ struct intel_guc {
138}; 148};
139 149
140/* intel_guc_loader.c */ 150/* intel_guc_loader.c */
141extern void intel_guc_ucode_init(struct drm_device *dev); 151extern void intel_guc_init(struct drm_device *dev);
142extern int intel_guc_ucode_load(struct drm_device *dev); 152extern int intel_guc_setup(struct drm_device *dev);
143extern void intel_guc_ucode_fini(struct drm_device *dev); 153extern void intel_guc_fini(struct drm_device *dev);
144extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); 154extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
145extern int intel_guc_suspend(struct drm_device *dev); 155extern int intel_guc_suspend(struct drm_device *dev);
146extern int intel_guc_resume(struct drm_device *dev); 156extern int intel_guc_resume(struct drm_device *dev);
147 157
148/* i915_guc_submission.c */ 158/* i915_guc_submission.c */
149int i915_guc_submission_init(struct drm_device *dev); 159int i915_guc_submission_init(struct drm_i915_private *dev_priv);
150int i915_guc_submission_enable(struct drm_device *dev); 160int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
151int i915_guc_submit(struct i915_guc_client *client, 161int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
152 struct drm_i915_gem_request *rq); 162int i915_guc_submit(struct drm_i915_gem_request *rq);
153void i915_guc_submission_disable(struct drm_device *dev); 163void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
154void i915_guc_submission_fini(struct drm_device *dev); 164void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
155int i915_guc_wq_check_space(struct i915_guc_client *client);
156 165
157#endif 166#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) 71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
72 72
73#define WQ_RING_TAIL_SHIFT 20 73#define WQ_RING_TAIL_SHIFT 20
74#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) 74#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
75#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
75 76
76#define GUC_DOORBELL_ENABLED 1 77#define GUC_DOORBELL_ENABLED 1
77#define GUC_DOORBELL_DISABLED 0 78#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..8fe96a2d989e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,12 @@
59 * 59 *
60 */ 60 */
61 61
62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
63MODULE_FIRMWARE(I915_SKL_GUC_UCODE); 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64 64
65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
66MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
67
65/* User-friendly representation of an enum */ 68/* User-friendly representation of an enum */
66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) 69const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67{ 70{
@@ -100,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{ 103{
101 struct intel_engine_cs *engine; 104 struct intel_engine_cs *engine;
102 int irqs; 105 int irqs;
106 u32 tmp;
103 107
104 /* tell all command streamers to forward interrupts and vblank to GuC */ 108 /* tell all command streamers to forward interrupts and vblank to GuC */
105 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); 109 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
@@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
114 I915_WRITE(GUC_BCS_RCS_IER, ~irqs); 118 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); 119 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116 I915_WRITE(GUC_WD_VECS_IER, ~irqs); 120 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
121
122 /*
123 * If GuC has routed PM interrupts to itself, don't keep it.
124 * and keep other interrupts those are unmasked by GuC.
125 */
126 tmp = I915_READ(GEN6_PMINTRMSK);
127 if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
128 dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
129 dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
130 }
117} 131}
118 132
119static u32 get_gttype(struct drm_i915_private *dev_priv) 133static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
281 return ret; 295 return ret;
282} 296}
283 297
298static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
299{
300 u32 wopcm_size = GUC_WOPCM_TOP;
301
302 /* On BXT, the top of WOPCM is reserved for RC6 context */
303 if (IS_BROXTON(dev_priv))
304 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
305
306 return wopcm_size;
307}
308
284/* 309/*
285 * Load the GuC firmware blob into the MinuteIA. 310 * Load the GuC firmware blob into the MinuteIA.
286 */ 311 */
@@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
308 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 333 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309 334
310 /* init WOPCM */ 335 /* init WOPCM */
311 I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); 336 I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
312 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); 337 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313 338
314 /* Enable MIA caching. GuC clock gating is disabled. */ 339 /* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,66 +397,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
372} 397}
373 398
374/** 399/**
375 * intel_guc_ucode_load() - load GuC uCode into the device 400 * intel_guc_setup() - finish preparing the GuC for activity
376 * @dev: drm device 401 * @dev: drm device
377 * 402 *
378 * Called from gem_init_hw() during driver loading and also after a GPU reset. 403 * Called from gem_init_hw() during driver loading and also after a GPU reset.
379 * 404 *
405 * The main action required here it to load the GuC uCode into the device.
380 * The firmware image should have already been fetched into memory by the 406 * The firmware image should have already been fetched into memory by the
381 * earlier call to intel_guc_ucode_init(), so here we need only check that 407 * earlier call to intel_guc_init(), so here we need only check that worked,
382 * is succeeded, and then transfer the image to the h/w. 408 * and then transfer the image to the h/w.
383 * 409 *
384 * Return: non-zero code on error 410 * Return: non-zero code on error
385 */ 411 */
386int intel_guc_ucode_load(struct drm_device *dev) 412int intel_guc_setup(struct drm_device *dev)
387{ 413{
388 struct drm_i915_private *dev_priv = dev->dev_private; 414 struct drm_i915_private *dev_priv = dev->dev_private;
389 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 415 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
390 int retries, err = 0; 416 const char *fw_path = guc_fw->guc_fw_path;
391 417 int retries, ret, err;
392 if (!i915.enable_guc_submission)
393 return 0;
394 418
395 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", 419 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
420 fw_path,
396 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 421 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
397 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 422 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
398 423
399 direct_interrupts_to_host(dev_priv); 424 /* Loading forbidden, or no firmware to load? */
400 425 if (!i915.enable_guc_loading) {
401 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) 426 err = 0;
402 return 0;
403
404 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
405 guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
406 return -ENOEXEC;
407
408 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
409
410 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
411 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
412
413 switch (guc_fw->guc_fw_fetch_status) {
414 case GUC_FIRMWARE_FAIL:
415 /* something went wrong :( */
416 err = -EIO;
417 goto fail; 427 goto fail;
418 428 } else if (fw_path == NULL) {
419 case GUC_FIRMWARE_NONE: 429 /* Device is known to have no uCode (e.g. no GuC) */
420 case GUC_FIRMWARE_PENDING:
421 default:
422 /* "can't happen" */
423 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
424 guc_fw->guc_fw_path,
425 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
426 guc_fw->guc_fw_fetch_status);
427 err = -ENXIO; 430 err = -ENXIO;
428 goto fail; 431 goto fail;
432 } else if (*fw_path == '\0') {
433 /* Device has a GuC but we don't know what f/w to load? */
434 DRM_INFO("No GuC firmware known for this platform\n");
435 err = -ENODEV;
436 goto fail;
437 }
429 438
430 case GUC_FIRMWARE_SUCCESS: 439 /* Fetch failed, or already fetched but failed to load? */
431 break; 440 if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
441 err = -EIO;
442 goto fail;
443 } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
444 err = -ENOEXEC;
445 goto fail;
432 } 446 }
433 447
434 err = i915_guc_submission_init(dev); 448 direct_interrupts_to_host(dev_priv);
449
450 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
451
452 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
453 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
454 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
455
456 err = i915_guc_submission_init(dev_priv);
435 if (err) 457 if (err)
436 goto fail; 458 goto fail;
437 459
@@ -448,7 +470,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
448 */ 470 */
449 err = i915_reset_guc(dev_priv); 471 err = i915_reset_guc(dev_priv);
450 if (err) { 472 if (err) {
451 DRM_ERROR("GuC reset failed, err %d\n", err); 473 DRM_ERROR("GuC reset failed: %d\n", err);
452 goto fail; 474 goto fail;
453 } 475 }
454 476
@@ -459,8 +481,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
459 if (--retries == 0) 481 if (--retries == 0)
460 goto fail; 482 goto fail;
461 483
462 DRM_INFO("GuC fw load failed, err %d; will reset and " 484 DRM_INFO("GuC fw load failed: %d; will reset and "
463 "retry %d more time(s)\n", err, retries); 485 "retry %d more time(s)\n", err, retries);
464 } 486 }
465 487
466 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; 488 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -470,10 +492,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
470 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 492 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
471 493
472 if (i915.enable_guc_submission) { 494 if (i915.enable_guc_submission) {
473 /* The execbuf_client will be recreated. Release it first. */ 495 err = i915_guc_submission_enable(dev_priv);
474 i915_guc_submission_disable(dev);
475
476 err = i915_guc_submission_enable(dev);
477 if (err) 496 if (err)
478 goto fail; 497 goto fail;
479 direct_interrupts_to_guc(dev_priv); 498 direct_interrupts_to_guc(dev_priv);
@@ -482,15 +501,50 @@ int intel_guc_ucode_load(struct drm_device *dev)
482 return 0; 501 return 0;
483 502
484fail: 503fail:
485 DRM_ERROR("GuC firmware load failed, err %d\n", err);
486 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) 504 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
487 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; 505 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
488 506
489 direct_interrupts_to_host(dev_priv); 507 direct_interrupts_to_host(dev_priv);
490 i915_guc_submission_disable(dev); 508 i915_guc_submission_disable(dev_priv);
491 i915_guc_submission_fini(dev); 509 i915_guc_submission_fini(dev_priv);
492 510
493 return err; 511 /*
512 * We've failed to load the firmware :(
513 *
514 * Decide whether to disable GuC submission and fall back to
515 * execlist mode, and whether to hide the error by returning
516 * zero or to return -EIO, which the caller will treat as a
517 * nonfatal error (i.e. it doesn't prevent driver load, but
518 * marks the GPU as wedged until reset).
519 */
520 if (i915.enable_guc_loading > 1) {
521 ret = -EIO;
522 } else if (i915.enable_guc_submission > 1) {
523 ret = -EIO;
524 } else {
525 ret = 0;
526 }
527
528 if (err == 0 && !HAS_GUC_UCODE(dev))
529 ; /* Don't mention the GuC! */
530 else if (err == 0)
531 DRM_INFO("GuC firmware load skipped\n");
532 else if (ret != -EIO)
533 DRM_INFO("GuC firmware load failed: %d\n", err);
534 else
535 DRM_ERROR("GuC firmware load failed: %d\n", err);
536
537 if (i915.enable_guc_submission) {
538 if (fw_path == NULL)
539 DRM_INFO("GuC submission without firmware not supported\n");
540 if (ret == 0)
541 DRM_INFO("Falling back from GuC submission to execlist mode\n");
542 else
543 DRM_ERROR("GuC init failed: %d\n", ret);
544 }
545 i915.enable_guc_submission = 0;
546
547 return ret;
494} 548}
495 549
496static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) 550static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
552 606
553 /* Header and uCode will be loaded to WOPCM. Size of the two. */ 607 /* Header and uCode will be loaded to WOPCM. Size of the two. */
554 size = guc_fw->header_size + guc_fw->ucode_size; 608 size = guc_fw->header_size + guc_fw->ucode_size;
555 609 if (size > guc_wopcm_size(dev->dev_private)) {
556 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
557 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
558 DRM_ERROR("Firmware is too large to fit in WOPCM\n"); 610 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
559 goto fail; 611 goto fail;
560 } 612 }
@@ -617,22 +669,25 @@ fail:
617} 669}
618 670
619/** 671/**
620 * intel_guc_ucode_init() - define parameters and fetch firmware 672 * intel_guc_init() - define parameters and fetch firmware
621 * @dev: drm device 673 * @dev: drm device
622 * 674 *
623 * Called early during driver load, but after GEM is initialised. 675 * Called early during driver load, but after GEM is initialised.
624 * 676 *
625 * The firmware will be transferred to the GuC's memory later, 677 * The firmware will be transferred to the GuC's memory later,
626 * when intel_guc_ucode_load() is called. 678 * when intel_guc_setup() is called.
627 */ 679 */
628void intel_guc_ucode_init(struct drm_device *dev) 680void intel_guc_init(struct drm_device *dev)
629{ 681{
630 struct drm_i915_private *dev_priv = dev->dev_private; 682 struct drm_i915_private *dev_priv = dev->dev_private;
631 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 683 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
632 const char *fw_path; 684 const char *fw_path;
633 685
634 if (!HAS_GUC_SCHED(dev)) 686 /* A negative value means "use platform default" */
635 i915.enable_guc_submission = false; 687 if (i915.enable_guc_loading < 0)
688 i915.enable_guc_loading = HAS_GUC_UCODE(dev);
689 if (i915.enable_guc_submission < 0)
690 i915.enable_guc_submission = HAS_GUC_SCHED(dev);
636 691
637 if (!HAS_GUC_UCODE(dev)) { 692 if (!HAS_GUC_UCODE(dev)) {
638 fw_path = NULL; 693 fw_path = NULL;
@@ -640,27 +695,26 @@ void intel_guc_ucode_init(struct drm_device *dev)
640 fw_path = I915_SKL_GUC_UCODE; 695 fw_path = I915_SKL_GUC_UCODE;
641 guc_fw->guc_fw_major_wanted = 6; 696 guc_fw->guc_fw_major_wanted = 6;
642 guc_fw->guc_fw_minor_wanted = 1; 697 guc_fw->guc_fw_minor_wanted = 1;
698 } else if (IS_BROXTON(dev)) {
699 fw_path = I915_BXT_GUC_UCODE;
700 guc_fw->guc_fw_major_wanted = 8;
701 guc_fw->guc_fw_minor_wanted = 7;
643 } else { 702 } else {
644 i915.enable_guc_submission = false;
645 fw_path = ""; /* unknown device */ 703 fw_path = ""; /* unknown device */
646 } 704 }
647 705
648 if (!i915.enable_guc_submission)
649 return;
650
651 guc_fw->guc_dev = dev; 706 guc_fw->guc_dev = dev;
652 guc_fw->guc_fw_path = fw_path; 707 guc_fw->guc_fw_path = fw_path;
653 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 708 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
654 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; 709 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
655 710
711 /* Early (and silent) return if GuC loading is disabled */
712 if (!i915.enable_guc_loading)
713 return;
656 if (fw_path == NULL) 714 if (fw_path == NULL)
657 return; 715 return;
658 716 if (*fw_path == '\0')
659 if (*fw_path == '\0') {
660 DRM_ERROR("No GuC firmware known for this platform\n");
661 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
662 return; 717 return;
663 }
664 718
665 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; 719 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
666 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); 720 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,18 +723,18 @@ void intel_guc_ucode_init(struct drm_device *dev)
669} 723}
670 724
671/** 725/**
672 * intel_guc_ucode_fini() - clean up all allocated resources 726 * intel_guc_fini() - clean up all allocated resources
673 * @dev: drm device 727 * @dev: drm device
674 */ 728 */
675void intel_guc_ucode_fini(struct drm_device *dev) 729void intel_guc_fini(struct drm_device *dev)
676{ 730{
677 struct drm_i915_private *dev_priv = dev->dev_private; 731 struct drm_i915_private *dev_priv = dev->dev_private;
678 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 732 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
679 733
680 mutex_lock(&dev->struct_mutex); 734 mutex_lock(&dev->struct_mutex);
681 direct_interrupts_to_host(dev_priv); 735 direct_interrupts_to_host(dev_priv);
682 i915_guc_submission_disable(dev); 736 i915_guc_submission_disable(dev_priv);
683 i915_guc_submission_fini(dev); 737 i915_guc_submission_fini(dev_priv);
684 738
685 if (guc_fw->guc_fw_obj) 739 if (guc_fw->guc_fw_obj)
686 drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); 740 drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
new file mode 100644
index 000000000000..9fa458ce40a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_gvt.h"
26
27/**
28 * DOC: Intel GVT-g host support
29 *
30 * Intel GVT-g is a graphics virtualization technology which shares the
31 * GPU among multiple virtual machines on a time-sharing basis. Each
32 * virtual machine is presented a virtual GPU (vGPU), which has equivalent
33 * features as the underlying physical GPU (pGPU), so i915 driver can run
34 * seamlessly in a virtual machine. This file provides the englightments
35 * of GVT and the necessary components used by GVT in i915 driver.
36 */
37
38static bool is_supported_device(struct drm_i915_private *dev_priv)
39{
40 if (IS_BROADWELL(dev_priv))
41 return true;
42 return false;
43}
44
45/**
46 * intel_gvt_init - initialize GVT components
47 * @dev_priv: drm i915 private data
48 *
49 * This function is called at the initialization stage to create a GVT device.
50 *
51 * Returns:
52 * Zero on success, negative error code if failed.
53 *
54 */
55int intel_gvt_init(struct drm_i915_private *dev_priv)
56{
57 int ret;
58
59 if (!i915.enable_gvt) {
60 DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
61 return 0;
62 }
63
64 if (!is_supported_device(dev_priv)) {
65 DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
66 return 0;
67 }
68
69 /*
70 * We're not in host or fail to find a MPT module, disable GVT-g
71 */
72 ret = intel_gvt_init_host();
73 if (ret) {
74 DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
75 return 0;
76 }
77
78 ret = intel_gvt_init_device(dev_priv);
79 if (ret) {
80 DRM_DEBUG_DRIVER("Fail to init GVT device\n");
81 return 0;
82 }
83
84 return 0;
85}
86
87/**
88 * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
89 * @dev_priv: drm i915 private *
90 *
91 * This function is called at the i915 driver unloading stage, to shutdown
92 * GVT components and release the related resources.
93 */
94void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
95{
96 if (!intel_gvt_active(dev_priv))
97 return;
98
99 intel_gvt_clean_device(dev_priv);
100}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
new file mode 100644
index 000000000000..960211df74db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _INTEL_GVT_H_
25#define _INTEL_GVT_H_
26
27#include "gvt/gvt.h"
28
29#ifdef CONFIG_DRM_I915_GVT
30int intel_gvt_init(struct drm_i915_private *dev_priv);
31void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
32int intel_gvt_init_device(struct drm_i915_private *dev_priv);
33void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
34int intel_gvt_init_host(void);
35#else
36static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
37{
38 return 0;
39}
40static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
41{
42}
43#endif
44
45#endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2c3bd9c2573e..fb21626ada64 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1678 struct intel_crtc *intel_crtc = 1678 struct intel_crtc *intel_crtc =
1679 to_intel_crtc(encoder->base.crtc); 1679 to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1681 enum dpio_channel port = vlv_dport_to_channel(dport);
1682 int pipe = intel_crtc->pipe;
1683 u32 val;
1684 1681
1685 /* Enable clock channels for this port */ 1682 vlv_phy_pre_encoder_enable(encoder);
1686 mutex_lock(&dev_priv->sb_lock);
1687 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1688 val = 0;
1689 if (pipe)
1690 val |= (1<<21);
1691 else
1692 val &= ~(1<<21);
1693 val |= 0x001000c4;
1694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1695 1683
1696 /* HDMI 1.0V-2dB */ 1684 /* HDMI 1.0V-2dB */
1697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); 1685 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); 1686 0x2b247878);
1699 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
1700 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
1701 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
1702 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1703 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1704 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1705
1706 /* Program lane clock */
1707 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1709 mutex_unlock(&dev_priv->sb_lock);
1710 1687
1711 intel_hdmi->set_infoframes(&encoder->base, 1688 intel_hdmi->set_infoframes(&encoder->base,
1712 intel_crtc->config->has_hdmi_sink, 1689 intel_crtc->config->has_hdmi_sink,
@@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1719 1696
1720static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1697static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1721{ 1698{
1722 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1723 struct drm_device *dev = encoder->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private;
1725 struct intel_crtc *intel_crtc =
1726 to_intel_crtc(encoder->base.crtc);
1727 enum dpio_channel port = vlv_dport_to_channel(dport);
1728 int pipe = intel_crtc->pipe;
1729
1730 intel_hdmi_prepare(encoder); 1699 intel_hdmi_prepare(encoder);
1731 1700
1732 /* Program Tx lane resets to default */ 1701 vlv_phy_pre_pll_enable(encoder);
1733 mutex_lock(&dev_priv->sb_lock);
1734 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1735 DPIO_PCS_TX_LANE2_RESET |
1736 DPIO_PCS_TX_LANE1_RESET);
1737 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1738 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1739 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1740 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1741 DPIO_PCS_CLK_SOFT_RESET);
1742
1743 /* Fix up inter-pair skew failure */
1744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1745 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1746 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1747
1748 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1749 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1750 mutex_unlock(&dev_priv->sb_lock);
1751}
1752
1753static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
1754 bool reset)
1755{
1756 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1757 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1758 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1759 enum pipe pipe = crtc->pipe;
1760 uint32_t val;
1761
1762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1763 if (reset)
1764 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1765 else
1766 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1767 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1768
1769 if (crtc->config->lane_count > 2) {
1770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1771 if (reset)
1772 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1773 else
1774 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1775 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1776 }
1777
1778 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1779 val |= CHV_PCS_REQ_SOFTRESET_EN;
1780 if (reset)
1781 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1782 else
1783 val |= DPIO_PCS_CLK_SOFT_RESET;
1784 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1785
1786 if (crtc->config->lane_count > 2) {
1787 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1788 val |= CHV_PCS_REQ_SOFTRESET_EN;
1789 if (reset)
1790 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1791 else
1792 val |= DPIO_PCS_CLK_SOFT_RESET;
1793 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1794 }
1795} 1702}
1796 1703
1797static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1704static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1798{ 1705{
1799 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1800 struct drm_device *dev = encoder->base.dev;
1801 struct drm_i915_private *dev_priv = dev->dev_private;
1802 struct intel_crtc *intel_crtc =
1803 to_intel_crtc(encoder->base.crtc);
1804 enum dpio_channel ch = vlv_dport_to_channel(dport);
1805 enum pipe pipe = intel_crtc->pipe;
1806 u32 val;
1807
1808 intel_hdmi_prepare(encoder); 1706 intel_hdmi_prepare(encoder);
1809 1707
1810 /* 1708 chv_phy_pre_pll_enable(encoder);
1811 * Must trick the second common lane into life.
1812 * Otherwise we can't even access the PLL.
1813 */
1814 if (ch == DPIO_CH0 && pipe == PIPE_B)
1815 dport->release_cl2_override =
1816 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
1817
1818 chv_phy_powergate_lanes(encoder, true, 0x0);
1819
1820 mutex_lock(&dev_priv->sb_lock);
1821
1822 /* Assert data lane reset */
1823 chv_data_lane_soft_reset(encoder, true);
1824
1825 /* program left/right clock distribution */
1826 if (pipe != PIPE_B) {
1827 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1828 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1829 if (ch == DPIO_CH0)
1830 val |= CHV_BUFLEFTENA1_FORCE;
1831 if (ch == DPIO_CH1)
1832 val |= CHV_BUFRIGHTENA1_FORCE;
1833 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1834 } else {
1835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1836 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1837 if (ch == DPIO_CH0)
1838 val |= CHV_BUFLEFTENA2_FORCE;
1839 if (ch == DPIO_CH1)
1840 val |= CHV_BUFRIGHTENA2_FORCE;
1841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1842 }
1843
1844 /* program clock channel usage */
1845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
1846 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1847 if (pipe != PIPE_B)
1848 val &= ~CHV_PCS_USEDCLKCHANNEL;
1849 else
1850 val |= CHV_PCS_USEDCLKCHANNEL;
1851 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
1852
1853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
1854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1855 if (pipe != PIPE_B)
1856 val &= ~CHV_PCS_USEDCLKCHANNEL;
1857 else
1858 val |= CHV_PCS_USEDCLKCHANNEL;
1859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
1860
1861 /*
1862 * This a a bit weird since generally CL
1863 * matches the pipe, but here we need to
1864 * pick the CL based on the port.
1865 */
1866 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
1867 if (pipe != PIPE_B)
1868 val &= ~CHV_CMN_USEDCLKCHANNEL;
1869 else
1870 val |= CHV_CMN_USEDCLKCHANNEL;
1871 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
1872
1873 mutex_unlock(&dev_priv->sb_lock);
1874} 1709}
1875 1710
1876static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) 1711static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
1877{ 1712{
1878 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1713 chv_phy_post_pll_disable(encoder);
1879 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
1880 u32 val;
1881
1882 mutex_lock(&dev_priv->sb_lock);
1883
1884 /* disable left/right clock distribution */
1885 if (pipe != PIPE_B) {
1886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1888 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1889 } else {
1890 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1891 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1893 }
1894
1895 mutex_unlock(&dev_priv->sb_lock);
1896
1897 /*
1898 * Leave the power down bit cleared for at least one
1899 * lane so that chv_powergate_phy_ch() will power
1900 * on something when the channel is otherwise unused.
1901 * When the port is off and the override is removed
1902 * the lanes power down anyway, so otherwise it doesn't
1903 * really matter what the state of power down bits is
1904 * after this.
1905 */
1906 chv_phy_powergate_lanes(encoder, false, 0x0);
1907} 1714}
1908 1715
1909static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1716static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1910{ 1717{
1911 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1912 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1913 struct intel_crtc *intel_crtc =
1914 to_intel_crtc(encoder->base.crtc);
1915 enum dpio_channel port = vlv_dport_to_channel(dport);
1916 int pipe = intel_crtc->pipe;
1917
1918 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1718 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1919 mutex_lock(&dev_priv->sb_lock); 1719 vlv_phy_reset_lanes(encoder);
1920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1922 mutex_unlock(&dev_priv->sb_lock);
1923} 1720}
1924 1721
1925static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1722static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1944 struct intel_crtc *intel_crtc = 1741 struct intel_crtc *intel_crtc =
1945 to_intel_crtc(encoder->base.crtc); 1742 to_intel_crtc(encoder->base.crtc);
1946 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1743 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1947 enum dpio_channel ch = vlv_dport_to_channel(dport);
1948 int pipe = intel_crtc->pipe;
1949 int data, i, stagger;
1950 u32 val;
1951 1744
1952 mutex_lock(&dev_priv->sb_lock); 1745 chv_phy_pre_encoder_enable(encoder);
1953
1954 /* allow hardware to manage TX FIFO reset source */
1955 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1956 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1957 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1958
1959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1960 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1962
1963 /* Program Tx latency optimal setting */
1964 for (i = 0; i < 4; i++) {
1965 /* Set the upar bit */
1966 data = (i == 1) ? 0x0 : 0x1;
1967 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1968 data << DPIO_UPAR_SHIFT);
1969 }
1970
1971 /* Data lane stagger programming */
1972 if (intel_crtc->config->port_clock > 270000)
1973 stagger = 0x18;
1974 else if (intel_crtc->config->port_clock > 135000)
1975 stagger = 0xd;
1976 else if (intel_crtc->config->port_clock > 67500)
1977 stagger = 0x7;
1978 else if (intel_crtc->config->port_clock > 33750)
1979 stagger = 0x4;
1980 else
1981 stagger = 0x2;
1982
1983 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1984 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1985 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1986
1987 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1988 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1989 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1990
1991 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
1992 DPIO_LANESTAGGER_STRAP(stagger) |
1993 DPIO_LANESTAGGER_STRAP_OVRD |
1994 DPIO_TX1_STAGGER_MASK(0x1f) |
1995 DPIO_TX1_STAGGER_MULT(6) |
1996 DPIO_TX2_STAGGER_MULT(0));
1997
1998 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
1999 DPIO_LANESTAGGER_STRAP(stagger) |
2000 DPIO_LANESTAGGER_STRAP_OVRD |
2001 DPIO_TX1_STAGGER_MASK(0x1f) |
2002 DPIO_TX1_STAGGER_MULT(7) |
2003 DPIO_TX2_STAGGER_MULT(5));
2004
2005 /* Deassert data lane reset */
2006 chv_data_lane_soft_reset(encoder, false);
2007
2008 /* Clear calc init */
2009 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2010 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2011 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2012 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2014
2015 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2016 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2017 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2018 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2019 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2020
2021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
2022 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2023 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2024 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
2025
2026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
2027 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2028 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2029 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
2030 1746
2031 /* FIXME: Program the support xxx V-dB */ 1747 /* FIXME: Program the support xxx V-dB */
2032 /* Use 800mV-0dB */ 1748 /* Use 800mV-0dB */
2033 for (i = 0; i < 4; i++) { 1749 chv_set_phy_signal_level(encoder, 128, 102, false);
2034 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2035 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2036 val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
2037 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2038 }
2039
2040 for (i = 0; i < 4; i++) {
2041 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2042
2043 val &= ~DPIO_SWING_MARGIN000_MASK;
2044 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
2045
2046 /*
2047 * Supposedly this value shouldn't matter when unique transition
2048 * scale is disabled, but in fact it does matter. Let's just
2049 * always program the same value and hope it's OK.
2050 */
2051 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2052 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
2053
2054 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2055 }
2056
2057 /*
2058 * The document said it needs to set bit 27 for ch0 and bit 26
2059 * for ch1. Might be a typo in the doc.
2060 * For now, for this unique transition scale selection, set bit
2061 * 27 for ch0 and ch1.
2062 */
2063 for (i = 0; i < 4; i++) {
2064 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2065 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2066 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2067 }
2068
2069 /* Start swing calculation */
2070 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2071 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2073
2074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2075 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2076 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2077
2078 mutex_unlock(&dev_priv->sb_lock);
2079 1750
2080 intel_hdmi->set_infoframes(&encoder->base, 1751 intel_hdmi->set_infoframes(&encoder->base,
2081 intel_crtc->config->has_hdmi_sink, 1752 intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
2086 vlv_wait_port_ready(dev_priv, dport, 0x0); 1757 vlv_wait_port_ready(dev_priv, dport, 0x0);
2087 1758
2088 /* Second common lane will stay alive on its own now */ 1759 /* Second common lane will stay alive on its own now */
2089 if (dport->release_cl2_override) { 1760 chv_phy_release_cl2_override(encoder);
2090 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2091 dport->release_cl2_override = false;
2092 }
2093} 1761}
2094 1762
2095static void intel_hdmi_destroy(struct drm_connector *connector) 1763static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2106,6 +1774,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2106 .fill_modes = drm_helper_probe_single_connector_modes, 1774 .fill_modes = drm_helper_probe_single_connector_modes,
2107 .set_property = intel_hdmi_set_property, 1775 .set_property = intel_hdmi_set_property,
2108 .atomic_get_property = intel_connector_atomic_get_property, 1776 .atomic_get_property = intel_connector_atomic_get_property,
1777 .early_unregister = intel_connector_unregister,
2109 .destroy = intel_hdmi_destroy, 1778 .destroy = intel_hdmi_destroy,
2110 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1779 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2111 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1780 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2114,7 +1783,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2114static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 1783static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
2115 .get_modes = intel_hdmi_get_modes, 1784 .get_modes = intel_hdmi_get_modes,
2116 .mode_valid = intel_hdmi_mode_valid, 1785 .mode_valid = intel_hdmi_mode_valid,
2117 .best_encoder = intel_best_encoder,
2118}; 1786};
2119 1787
2120static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 1788static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2142,6 +1810,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2142 enum port port = intel_dig_port->port; 1810 enum port port = intel_dig_port->port;
2143 uint8_t alternate_ddc_pin; 1811 uint8_t alternate_ddc_pin;
2144 1812
1813 DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
1814 port_name(port));
1815
2145 if (WARN(intel_dig_port->max_lanes < 4, 1816 if (WARN(intel_dig_port->max_lanes < 4,
2146 "Not enough lanes (%d) for HDMI on port %c\n", 1817 "Not enough lanes (%d) for HDMI on port %c\n",
2147 intel_dig_port->max_lanes, port_name(port))) 1818 intel_dig_port->max_lanes, port_name(port)))
@@ -2239,7 +1910,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2239 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1910 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2240 else 1911 else
2241 intel_connector->get_hw_state = intel_connector_get_hw_state; 1912 intel_connector->get_hw_state = intel_connector_get_hw_state;
2242 intel_connector->unregister = intel_connector_unregister;
2243 1913
2244 intel_hdmi_add_properties(intel_hdmi, connector); 1914 intel_hdmi_add_properties(intel_hdmi, connector);
2245 1915
@@ -2277,7 +1947,7 @@ void intel_hdmi_init(struct drm_device *dev,
2277 intel_encoder = &intel_dig_port->base; 1947 intel_encoder = &intel_dig_port->base;
2278 1948
2279 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1949 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
2280 DRM_MODE_ENCODER_TMDS, NULL); 1950 DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
2281 1951
2282 intel_encoder->compute_config = intel_hdmi_compute_config; 1952 intel_encoder->compute_config = intel_hdmi_compute_config;
2283 if (HAS_PCH_SPLIT(dev)) { 1953 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..38eeca7a6e72 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
226 intel_runtime_pm_put(dev_priv); 226 intel_runtime_pm_put(dev_priv);
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
346 346
347/** 347/**
348 * intel_hpd_irq_handler - main hotplug irq handler 348 * intel_hpd_irq_handler - main hotplug irq handler
349 * @dev: drm device 349 * @dev_priv: drm_i915_private
350 * @pin_mask: a mask of hpd pins that have triggered the irq 350 * @pin_mask: a mask of hpd pins that have triggered the irq
351 * @long_mask: a mask of hpd pins that may be long hpd pulses 351 * @long_mask: a mask of hpd pins that may be long hpd pulses
352 * 352 *
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
360 * Here, we do hotplug irq storm detection and mitigation, and pass further 360 * Here, we do hotplug irq storm detection and mitigation, and pass further
361 * processing to appropriate bottom halves. 361 * processing to appropriate bottom halves.
362 */ 362 */
363void intel_hpd_irq_handler(struct drm_device *dev, 363void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
364 u32 pin_mask, u32 long_mask) 364 u32 pin_mask, u32 long_mask)
365{ 365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i; 366 int i;
368 enum port port; 367 enum port port;
369 bool storm_detected = false; 368 bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
407 * hotplug bits itself. So only WARN about unexpected 406 * hotplug bits itself. So only WARN about unexpected
408 * interrupts on saner platforms. 407 * interrupts on saner platforms.
409 */ 408 */
410 WARN_ONCE(!HAS_GMCH_DISPLAY(dev), 409 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
411 "Received HPD interrupt on pin %d although disabled\n", i); 410 "Received HPD interrupt on pin %d although disabled\n", i);
412 continue; 411 continue;
413 } 412 }
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
427 } 426 }
428 427
429 if (storm_detected) 428 if (storm_detected)
430 dev_priv->display.hpd_irq_setup(dev); 429 dev_priv->display.hpd_irq_setup(dev_priv);
431 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
432 431
433 /* 432 /*
@@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
485 */ 484 */
486 spin_lock_irq(&dev_priv->irq_lock); 485 spin_lock_irq(&dev_priv->irq_lock);
487 if (dev_priv->display.hpd_irq_setup) 486 if (dev_priv->display.hpd_irq_setup)
488 dev_priv->display.hpd_irq_setup(dev); 487 dev_priv->display.hpd_irq_setup(dev_priv);
489 spin_unlock_irq(&dev_priv->irq_lock); 488 spin_unlock_irq(&dev_priv->irq_lock);
490} 489}
491 490
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..debed011a958 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,31 +208,27 @@
208} while (0) 208} while (0)
209 209
210enum { 210enum {
211 ADVANCED_CONTEXT = 0,
212 LEGACY_32B_CONTEXT,
213 ADVANCED_AD_CONTEXT,
214 LEGACY_64B_CONTEXT
215};
216#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
219 LEGACY_32B_CONTEXT)
220enum {
221 FAULT_AND_HANG = 0, 211 FAULT_AND_HANG = 0,
222 FAULT_AND_HALT, /* Debug only */ 212 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_STREAM, 213 FAULT_AND_STREAM,
224 FAULT_AND_CONTINUE /* Unsupported */ 214 FAULT_AND_CONTINUE /* Unsupported */
225}; 215};
226#define GEN8_CTX_ID_SHIFT 32 216#define GEN8_CTX_ID_SHIFT 32
217#define GEN8_CTX_ID_WIDTH 21
227#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
228#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
229 220
230static int intel_lr_context_pin(struct intel_context *ctx, 221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
225 struct intel_engine_cs *engine);
226static int intel_lr_context_pin(struct i915_gem_context *ctx,
231 struct intel_engine_cs *engine); 227 struct intel_engine_cs *engine);
232 228
233/** 229/**
234 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
235 * @dev: DRM device. 231 * @dev_priv: i915 device private
236 * @enable_execlists: value of i915.enable_execlists module parameter. 232 * @enable_execlists: value of i915.enable_execlists module parameter.
237 * 233 *
238 * Only certain platforms support Execlists (the prerequisites being 234 * Only certain platforms support Execlists (the prerequisites being
@@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
240 * 236 *
241 * Return: 1 if Execlists is supported and has to be enabled. 237 * Return: 1 if Execlists is supported and has to be enabled.
242 */ 238 */
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) 239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
244{ 240{
245 WARN_ON(i915.enable_ppgtt == -1);
246
247 /* On platforms with execlist available, vGPU will only 241 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode. 242 * support execlist mode, no ring buffer mode.
249 */ 243 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) 244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
251 return 1; 245 return 1;
252 246
253 if (INTEL_INFO(dev)->gen >= 9) 247 if (INTEL_GEN(dev_priv) >= 9)
254 return 1; 248 return 1;
255 249
256 if (enable_execlists == 0) 250 if (enable_execlists == 0)
257 return 0; 251 return 0;
258 252
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && 253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
260 i915.use_mmio_flip >= 0) 255 i915.use_mmio_flip >= 0)
261 return 1; 256 return 1;
262 257
@@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266static void 261static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
268{ 263{
269 struct drm_device *dev = engine->dev; 264 struct drm_i915_private *dev_priv = engine->i915;
270 265
271 if (IS_GEN8(dev) || IS_GEN9(dev)) 266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
272 engine->idle_lite_restore_wa = ~0; 267 engine->idle_lite_restore_wa = ~0;
273 268
274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
276 (engine->id == VCS || engine->id == VCS2); 271 (engine->id == VCS || engine->id == VCS2);
277 272
278 engine->ctx_desc_template = GEN8_CTX_VALID; 273 engine->ctx_desc_template = GEN8_CTX_VALID;
279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 274 if (IS_GEN8(dev_priv))
280 GEN8_CTX_ADDRESSING_MODE_SHIFT;
281 if (IS_GEN8(dev))
282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
284 277
@@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
297 * descriptor for a pinned context 290 * descriptor for a pinned context
298 * 291 *
299 * @ctx: Context to work on 292 * @ctx: Context to work on
300 * @ring: Engine the descriptor will be used with 293 * @engine: Engine the descriptor will be used with
301 * 294 *
302 * The context descriptor encodes various attributes of a context, 295 * The context descriptor encodes various attributes of a context,
303 * including its GTT address and some flags. Because it's fairly 296 * including its GTT address and some flags. Because it's fairly
@@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
305 * which remains valid until the context is unpinned. 298 * which remains valid until the context is unpinned.
306 * 299 *
307 * This is what a descriptor looks like, from LSB to MSB: 300 * This is what a descriptor looks like, from LSB to MSB:
308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) 303 * bits 32-52: ctx ID, a globally unique tag
311 * bits 52-63: reserved, may encode the engine ID (for GuC) 304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
312 */ 306 */
313static void 307static void
314intel_lr_context_descriptor_update(struct intel_context *ctx, 308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
315 struct intel_engine_cs *engine) 309 struct intel_engine_cs *engine)
316{ 310{
317 uint64_t lrca, desc; 311 struct intel_context *ce = &ctx->engine[engine->id];
312 u64 desc;
318 313
319 lrca = ctx->engine[engine->id].lrc_vma->node.start + 314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
320 LRC_PPHWSP_PN * PAGE_SIZE;
321 315
322 desc = engine->ctx_desc_template; /* bits 0-11 */ 316 desc = ctx->desc_template; /* bits 3-4 */
323 desc |= lrca; /* bits 12-31 */ 317 desc |= engine->ctx_desc_template; /* bits 0-11 */
324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 318 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319 /* bits 12-31 */
320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
325 321
326 ctx->engine[engine->id].lrc_desc = desc; 322 ce->lrc_desc = desc;
327} 323}
328 324
329uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
330 struct intel_engine_cs *engine) 326 struct intel_engine_cs *engine)
331{ 327{
332 return ctx->engine[engine->id].lrc_desc; 328 return ctx->engine[engine->id].lrc_desc;
333} 329}
334 330
335/**
336 * intel_execlists_ctx_id() - get the Execlists Context ID
337 * @ctx: Context to get the ID for
338 * @ring: Engine to get the ID for
339 *
340 * Do not confuse with ctx->id! Unfortunately we have a name overload
341 * here: the old context ID we pass to userspace as a handler so that
342 * they can refer to a context, and the new context ID we pass to the
343 * ELSP so that the GPU can inform us of the context status via
344 * interrupts.
345 *
346 * The context ID is a portion of the context descriptor, so we can
347 * just extract the required part from the cached descriptor.
348 *
349 * Return: 20-bits globally unique context ID.
350 */
351u32 intel_execlists_ctx_id(struct intel_context *ctx,
352 struct intel_engine_cs *engine)
353{
354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
355}
356
357static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
358 struct drm_i915_gem_request *rq1) 332 struct drm_i915_gem_request *rq1)
359{ 333{
360 334
361 struct intel_engine_cs *engine = rq0->engine; 335 struct intel_engine_cs *engine = rq0->engine;
362 struct drm_device *dev = engine->dev; 336 struct drm_i915_private *dev_priv = rq0->i915;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 uint64_t desc[2]; 337 uint64_t desc[2];
365 338
366 if (rq1) { 339 if (rq1) {
@@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
431 spin_unlock_irq(&dev_priv->uncore.lock); 404 spin_unlock_irq(&dev_priv->uncore.lock);
432} 405}
433 406
407static inline void execlists_context_status_change(
408 struct drm_i915_gem_request *rq,
409 unsigned long status)
410{
411 /*
412 * Only used when GVT-g is enabled now. When GVT-g is disabled,
413 * The compiler should eliminate this function as dead-code.
414 */
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return;
417
418 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419}
420
434static void execlists_context_unqueue(struct intel_engine_cs *engine) 421static void execlists_context_unqueue(struct intel_engine_cs *engine)
435{ 422{
436 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; 423 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
@@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
442 * If irqs are not active generate a warning as batches that finish 429 * If irqs are not active generate a warning as batches that finish
443 * without the irqs may get lost and a GPU Hang may occur. 430 * without the irqs may get lost and a GPU Hang may occur.
444 */ 431 */
445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); 432 WARN_ON(!intel_irqs_enabled(engine->i915));
446 433
447 /* Try to read in pairs */ 434 /* Try to read in pairs */
448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, 435 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
453 /* Same ctx: ignore first request, as second request 440 /* Same ctx: ignore first request, as second request
454 * will update tail past first request's workload */ 441 * will update tail past first request's workload */
455 cursor->elsp_submitted = req0->elsp_submitted; 442 cursor->elsp_submitted = req0->elsp_submitted;
456 list_move_tail(&req0->execlist_link, 443 list_del(&req0->execlist_link);
457 &engine->execlist_retired_req_list); 444 i915_gem_request_unreference(req0);
458 req0 = cursor; 445 req0 = cursor;
459 } else { 446 } else {
447 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448 /*
449 * req0 (after merged) ctx requires single
450 * submission, stop picking
451 */
452 if (req0->ctx->execlists_force_single_submission)
453 break;
454 /*
455 * req0 ctx doesn't require single submission,
456 * but next req ctx requires, stop picking
457 */
458 if (cursor->ctx->execlists_force_single_submission)
459 break;
460 }
460 req1 = cursor; 461 req1 = cursor;
461 WARN_ON(req1->elsp_submitted); 462 WARN_ON(req1->elsp_submitted);
462 break; 463 break;
@@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
466 if (unlikely(!req0)) 467 if (unlikely(!req0))
467 return; 468 return;
468 469
470 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472 if (req1)
473 execlists_context_status_change(req1,
474 INTEL_CONTEXT_SCHEDULE_IN);
475
469 if (req0->elsp_submitted & engine->idle_lite_restore_wa) { 476 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
470 /* 477 /*
471 * WaIdleLiteRestore: make sure we never cause a lite restore 478 * WaIdleLiteRestore: make sure we never cause a lite restore
@@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
486} 493}
487 494
488static unsigned int 495static unsigned int
489execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) 496execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
490{ 497{
491 struct drm_i915_gem_request *head_req; 498 struct drm_i915_gem_request *head_req;
492 499
@@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496 struct drm_i915_gem_request, 503 struct drm_i915_gem_request,
497 execlist_link); 504 execlist_link);
498 505
499 if (!head_req) 506 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
500 return 0; 507 return 0;
501
502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
503 return 0;
504 508
505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 509 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
506 510
507 if (--head_req->elsp_submitted > 0) 511 if (--head_req->elsp_submitted > 0)
508 return 0; 512 return 0;
509 513
510 list_move_tail(&head_req->execlist_link, 514 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
511 &engine->execlist_retired_req_list); 515
516 list_del(&head_req->execlist_link);
517 i915_gem_request_unreference(head_req);
512 518
513 return 1; 519 return 1;
514} 520}
@@ -517,7 +523,7 @@ static u32
517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, 523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
518 u32 *context_id) 524 u32 *context_id)
519{ 525{
520 struct drm_i915_private *dev_priv = engine->dev->dev_private; 526 struct drm_i915_private *dev_priv = engine->i915;
521 u32 status; 527 u32 status;
522 528
523 read_pointer %= GEN8_CSB_ENTRIES; 529 read_pointer %= GEN8_CSB_ENTRIES;
@@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
535 541
536/** 542/**
537 * intel_lrc_irq_handler() - handle Context Switch interrupts 543 * intel_lrc_irq_handler() - handle Context Switch interrupts
538 * @engine: Engine Command Streamer to handle. 544 * @data: tasklet handler passed in unsigned long
539 * 545 *
540 * Check the unread Context Status Buffers and manage the submission of new 546 * Check the unread Context Status Buffers and manage the submission of new
541 * contexts to the ELSP accordingly. 547 * contexts to the ELSP accordingly.
@@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
543static void intel_lrc_irq_handler(unsigned long data) 549static void intel_lrc_irq_handler(unsigned long data)
544{ 550{
545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 551 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
546 struct drm_i915_private *dev_priv = engine->dev->dev_private; 552 struct drm_i915_private *dev_priv = engine->i915;
547 u32 status_pointer; 553 u32 status_pointer;
548 unsigned int read_pointer, write_pointer; 554 unsigned int read_pointer, write_pointer;
549 u32 csb[GEN8_CSB_ENTRIES][2]; 555 u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
612 struct drm_i915_gem_request *cursor; 618 struct drm_i915_gem_request *cursor;
613 int num_elements = 0; 619 int num_elements = 0;
614 620
615 if (request->ctx != request->i915->kernel_context)
616 intel_lr_context_pin(request->ctx, engine);
617
618 i915_gem_request_reference(request);
619
620 spin_lock_bh(&engine->execlist_lock); 621 spin_lock_bh(&engine->execlist_lock);
621 622
622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) 623 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
633 if (request->ctx == tail_req->ctx) { 634 if (request->ctx == tail_req->ctx) {
634 WARN(tail_req->elsp_submitted != 0, 635 WARN(tail_req->elsp_submitted != 0,
635 "More than 2 already-submitted reqs queued\n"); 636 "More than 2 already-submitted reqs queued\n");
636 list_move_tail(&tail_req->execlist_link, 637 list_del(&tail_req->execlist_link);
637 &engine->execlist_retired_req_list); 638 i915_gem_request_unreference(tail_req);
638 } 639 }
639 } 640 }
640 641
642 i915_gem_request_reference(request);
641 list_add_tail(&request->execlist_link, &engine->execlist_queue); 643 list_add_tail(&request->execlist_link, &engine->execlist_queue);
644 request->ctx_hw_id = request->ctx->hw_id;
642 if (num_elements == 0) 645 if (num_elements == 0)
643 execlists_context_unqueue(engine); 646 execlists_context_unqueue(engine);
644 647
@@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
698 701
699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 702int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
700{ 703{
701 int ret = 0; 704 struct intel_engine_cs *engine = request->engine;
705 struct intel_context *ce = &request->ctx->engine[engine->id];
706 int ret;
707
708 /* Flush enough space to reduce the likelihood of waiting after
709 * we start building the request - in which case we will just
710 * have to repeat work.
711 */
712 request->reserved_space += EXECLISTS_REQUEST_SIZE;
713
714 if (!ce->state) {
715 ret = execlists_context_deferred_alloc(request->ctx, engine);
716 if (ret)
717 return ret;
718 }
702 719
703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; 720 request->ringbuf = ce->ringbuf;
704 721
705 if (i915.enable_guc_submission) { 722 if (i915.enable_guc_submission) {
706 /* 723 /*
@@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
708 * going any further, as the i915_add_request() call 725 * going any further, as the i915_add_request() call
709 * later on mustn't fail ... 726 * later on mustn't fail ...
710 */ 727 */
711 struct intel_guc *guc = &request->i915->guc; 728 ret = i915_guc_wq_check_space(request);
712
713 ret = i915_guc_wq_check_space(guc->execbuf_client);
714 if (ret) 729 if (ret)
715 return ret; 730 return ret;
716 } 731 }
717 732
718 if (request->ctx != request->i915->kernel_context) 733 ret = intel_lr_context_pin(request->ctx, engine);
719 ret = intel_lr_context_pin(request->ctx, request->engine); 734 if (ret)
735 return ret;
736
737 ret = intel_ring_begin(request, 0);
738 if (ret)
739 goto err_unpin;
740
741 if (!ce->initialised) {
742 ret = engine->init_context(request);
743 if (ret)
744 goto err_unpin;
745
746 ce->initialised = true;
747 }
748
749 /* Note that after this point, we have committed to using
750 * this request as it is being used to both track the
751 * state of engine initialisation and liveness of the
752 * golden renderstate above. Think twice before you try
753 * to cancel/unwind this request now.
754 */
755
756 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
757 return 0;
720 758
759err_unpin:
760 intel_lr_context_unpin(request->ctx, engine);
721 return ret; 761 return ret;
722} 762}
723 763
@@ -734,7 +774,6 @@ static int
734intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 774intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
735{ 775{
736 struct intel_ringbuffer *ringbuf = request->ringbuf; 776 struct intel_ringbuffer *ringbuf = request->ringbuf;
737 struct drm_i915_private *dev_priv = request->i915;
738 struct intel_engine_cs *engine = request->engine; 777 struct intel_engine_cs *engine = request->engine;
739 778
740 intel_logical_ring_advance(ringbuf); 779 intel_logical_ring_advance(ringbuf);
@@ -753,51 +792,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
753 if (intel_engine_stopped(engine)) 792 if (intel_engine_stopped(engine))
754 return 0; 793 return 0;
755 794
756 if (engine->last_context != request->ctx) { 795 /* We keep the previous context alive until we retire the following
757 if (engine->last_context) 796 * request. This ensures that any the context object is still pinned
758 intel_lr_context_unpin(engine->last_context, engine); 797 * for any residual writes the HW makes into it on the context switch
759 if (request->ctx != request->i915->kernel_context) { 798 * into the next object following the breadcrumb. Otherwise, we may
760 intel_lr_context_pin(request->ctx, engine); 799 * retire the context too early.
761 engine->last_context = request->ctx; 800 */
762 } else { 801 request->previous_context = engine->last_context;
763 engine->last_context = NULL; 802 engine->last_context = request->ctx;
764 }
765 }
766 803
767 if (dev_priv->guc.execbuf_client) 804 if (i915.enable_guc_submission)
768 i915_guc_submit(dev_priv->guc.execbuf_client, request); 805 i915_guc_submit(request);
769 else 806 else
770 execlists_context_queue(request); 807 execlists_context_queue(request);
771 808
772 return 0; 809 return 0;
773} 810}
774 811
775int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
776{
777 /*
778 * The first call merely notes the reserve request and is common for
779 * all back ends. The subsequent localised _begin() call actually
780 * ensures that the reservation is available. Without the begin, if
781 * the request creator immediately submitted the request without
782 * adding any commands to it then there might not actually be
783 * sufficient room for the submission commands.
784 */
785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
786
787 return intel_ring_begin(request, 0);
788}
789
790/** 812/**
791 * execlists_submission() - submit a batchbuffer for execution, Execlists style 813 * execlists_submission() - submit a batchbuffer for execution, Execlists style
792 * @dev: DRM device. 814 * @params: execbuffer call parameters.
793 * @file: DRM file.
794 * @ring: Engine Command Streamer to submit to.
795 * @ctx: Context to employ for this submission.
796 * @args: execbuffer call arguments. 815 * @args: execbuffer call arguments.
797 * @vmas: list of vmas. 816 * @vmas: list of vmas.
798 * @batch_obj: the batchbuffer to submit.
799 * @exec_start: batchbuffer start virtual address pointer.
800 * @dispatch_flags: translated execbuffer call flags.
801 * 817 *
802 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 818 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
803 * away the submission details of the execbuffer ioctl call. 819 * away the submission details of the execbuffer ioctl call.
@@ -881,28 +897,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
881 return 0; 897 return 0;
882} 898}
883 899
884void intel_execlists_retire_requests(struct intel_engine_cs *engine) 900void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
885{ 901{
886 struct drm_i915_gem_request *req, *tmp; 902 struct drm_i915_gem_request *req, *tmp;
887 struct list_head retired_list; 903 LIST_HEAD(cancel_list);
888 904
889 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 905 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
890 if (list_empty(&engine->execlist_retired_req_list))
891 return;
892 906
893 INIT_LIST_HEAD(&retired_list);
894 spin_lock_bh(&engine->execlist_lock); 907 spin_lock_bh(&engine->execlist_lock);
895 list_replace_init(&engine->execlist_retired_req_list, &retired_list); 908 list_replace_init(&engine->execlist_queue, &cancel_list);
896 spin_unlock_bh(&engine->execlist_lock); 909 spin_unlock_bh(&engine->execlist_lock);
897 910
898 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 911 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
899 struct intel_context *ctx = req->ctx;
900 struct drm_i915_gem_object *ctx_obj =
901 ctx->engine[engine->id].state;
902
903 if (ctx_obj && (ctx != req->i915->kernel_context))
904 intel_lr_context_unpin(ctx, engine);
905
906 list_del(&req->execlist_link); 912 list_del(&req->execlist_link);
907 i915_gem_request_unreference(req); 913 i915_gem_request_unreference(req);
908 } 914 }
@@ -910,7 +916,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
910 916
911void intel_logical_ring_stop(struct intel_engine_cs *engine) 917void intel_logical_ring_stop(struct intel_engine_cs *engine)
912{ 918{
913 struct drm_i915_private *dev_priv = engine->dev->dev_private; 919 struct drm_i915_private *dev_priv = engine->i915;
914 int ret; 920 int ret;
915 921
916 if (!intel_engine_initialized(engine)) 922 if (!intel_engine_initialized(engine))
@@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
946 return 0; 952 return 0;
947} 953}
948 954
949static int intel_lr_context_do_pin(struct intel_context *ctx, 955static int intel_lr_context_pin(struct i915_gem_context *ctx,
950 struct intel_engine_cs *engine) 956 struct intel_engine_cs *engine)
951{ 957{
952 struct drm_device *dev = engine->dev; 958 struct drm_i915_private *dev_priv = ctx->i915;
953 struct drm_i915_private *dev_priv = dev->dev_private; 959 struct intel_context *ce = &ctx->engine[engine->id];
954 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
955 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
956 void *vaddr; 960 void *vaddr;
957 u32 *lrc_reg_state; 961 u32 *lrc_reg_state;
958 int ret; 962 int ret;
959 963
960 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 964 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
965
966 if (ce->pin_count++)
967 return 0;
961 968
962 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 969 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
963 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 970 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
964 if (ret) 971 if (ret)
965 return ret; 972 goto err;
966 973
967 vaddr = i915_gem_object_pin_map(ctx_obj); 974 vaddr = i915_gem_object_pin_map(ce->state);
968 if (IS_ERR(vaddr)) { 975 if (IS_ERR(vaddr)) {
969 ret = PTR_ERR(vaddr); 976 ret = PTR_ERR(vaddr);
970 goto unpin_ctx_obj; 977 goto unpin_ctx_obj;
@@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
972 979
973 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 980 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
974 981
975 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); 982 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
976 if (ret) 983 if (ret)
977 goto unpin_map; 984 goto unpin_map;
978 985
979 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 986 i915_gem_context_reference(ctx);
987 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
980 intel_lr_context_descriptor_update(ctx, engine); 988 intel_lr_context_descriptor_update(ctx, engine);
981 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 989
982 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; 990 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
983 ctx_obj->dirty = true; 991 ce->lrc_reg_state = lrc_reg_state;
992 ce->state->dirty = true;
984 993
985 /* Invalidate GuC TLB. */ 994 /* Invalidate GuC TLB. */
986 if (i915.enable_guc_submission) 995 if (i915.enable_guc_submission)
987 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 996 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
988 997
989 return ret; 998 return 0;
990 999
991unpin_map: 1000unpin_map:
992 i915_gem_object_unpin_map(ctx_obj); 1001 i915_gem_object_unpin_map(ce->state);
993unpin_ctx_obj: 1002unpin_ctx_obj:
994 i915_gem_object_ggtt_unpin(ctx_obj); 1003 i915_gem_object_ggtt_unpin(ce->state);
995 1004err:
1005 ce->pin_count = 0;
996 return ret; 1006 return ret;
997} 1007}
998 1008
999static int intel_lr_context_pin(struct intel_context *ctx, 1009void intel_lr_context_unpin(struct i915_gem_context *ctx,
1000 struct intel_engine_cs *engine) 1010 struct intel_engine_cs *engine)
1001{ 1011{
1002 int ret = 0; 1012 struct intel_context *ce = &ctx->engine[engine->id];
1003 1013
1004 if (ctx->engine[engine->id].pin_count++ == 0) { 1014 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
1005 ret = intel_lr_context_do_pin(ctx, engine); 1015 GEM_BUG_ON(ce->pin_count == 0);
1006 if (ret)
1007 goto reset_pin_count;
1008 1016
1009 i915_gem_context_reference(ctx); 1017 if (--ce->pin_count)
1010 } 1018 return;
1011 return ret;
1012 1019
1013reset_pin_count: 1020 intel_unpin_ringbuffer_obj(ce->ringbuf);
1014 ctx->engine[engine->id].pin_count = 0;
1015 return ret;
1016}
1017 1021
1018void intel_lr_context_unpin(struct intel_context *ctx, 1022 i915_gem_object_unpin_map(ce->state);
1019 struct intel_engine_cs *engine) 1023 i915_gem_object_ggtt_unpin(ce->state);
1020{
1021 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1022 1024
1023 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1025 ce->lrc_vma = NULL;
1024 if (--ctx->engine[engine->id].pin_count == 0) { 1026 ce->lrc_desc = 0;
1025 i915_gem_object_unpin_map(ctx_obj); 1027 ce->lrc_reg_state = NULL;
1026 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1027 i915_gem_object_ggtt_unpin(ctx_obj);
1028 ctx->engine[engine->id].lrc_vma = NULL;
1029 ctx->engine[engine->id].lrc_desc = 0;
1030 ctx->engine[engine->id].lrc_reg_state = NULL;
1031 1028
1032 i915_gem_context_unreference(ctx); 1029 i915_gem_context_unreference(ctx);
1033 }
1034} 1030}
1035 1031
1036static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 1032static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1038 int ret, i; 1034 int ret, i;
1039 struct intel_engine_cs *engine = req->engine; 1035 struct intel_engine_cs *engine = req->engine;
1040 struct intel_ringbuffer *ringbuf = req->ringbuf; 1036 struct intel_ringbuffer *ringbuf = req->ringbuf;
1041 struct drm_device *dev = engine->dev; 1037 struct i915_workarounds *w = &req->i915->workarounds;
1042 struct drm_i915_private *dev_priv = dev->dev_private;
1043 struct i915_workarounds *w = &dev_priv->workarounds;
1044 1038
1045 if (w->count == 0) 1039 if (w->count == 0)
1046 return 0; 1040 return 0;
@@ -1106,12 +1100,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1106 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 1100 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1107 1101
1108 /* 1102 /*
1109 * WaDisableLSQCROPERFforOCL:skl 1103 * WaDisableLSQCROPERFforOCL:skl,kbl
1110 * This WA is implemented in skl_init_clock_gating() but since 1104 * This WA is implemented in skl_init_clock_gating() but since
1111 * this batch updates GEN8_L3SQCREG4 with default value we need to 1105 * this batch updates GEN8_L3SQCREG4 with default value we need to
1112 * set this bit here to retain the WA during flush. 1106 * set this bit here to retain the WA during flush.
1113 */ 1107 */
1114 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) 1108 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1109 IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
1115 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1110 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1116 1111
1117 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1112 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1163,7 +1158,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1163/** 1158/**
1164 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA 1159 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1165 * 1160 *
1166 * @ring: only applicable for RCS 1161 * @engine: only applicable for RCS
1167 * @wa_ctx: structure representing wa_ctx 1162 * @wa_ctx: structure representing wa_ctx
1168 * offset: specifies start of the batch, should be cache-aligned. This is updated 1163 * offset: specifies start of the batch, should be cache-aligned. This is updated
1169 * with the offset value received as input. 1164 * with the offset value received as input.
@@ -1200,7 +1195,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1200 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1195 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1201 1196
1202 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1197 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1203 if (IS_BROADWELL(engine->dev)) { 1198 if (IS_BROADWELL(engine->i915)) {
1204 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1199 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1205 if (rc < 0) 1200 if (rc < 0)
1206 return rc; 1201 return rc;
@@ -1237,7 +1232,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1237/** 1232/**
1238 * gen8_init_perctx_bb() - initialize per ctx batch with WA 1233 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1239 * 1234 *
1240 * @ring: only applicable for RCS 1235 * @engine: only applicable for RCS
1241 * @wa_ctx: structure representing wa_ctx 1236 * @wa_ctx: structure representing wa_ctx
1242 * offset: specifies start of the batch, should be cache-aligned. 1237 * offset: specifies start of the batch, should be cache-aligned.
1243 * size: size of the batch in DWORDS but HW expects in terms of cachelines 1238 * size: size of the batch in DWORDS but HW expects in terms of cachelines
@@ -1272,12 +1267,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1272 uint32_t *offset) 1267 uint32_t *offset)
1273{ 1268{
1274 int ret; 1269 int ret;
1275 struct drm_device *dev = engine->dev;
1276 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1270 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1277 1271
1278 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1272 /* WaDisableCtxRestoreArbitration:skl,bxt */
1279 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1273 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1280 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1274 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1281 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1275 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1282 1276
1283 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1277 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1286,6 +1280,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1286 return ret; 1280 return ret;
1287 index = ret; 1281 index = ret;
1288 1282
1283 /* WaClearSlmSpaceAtContextSwitch:kbl */
1284 /* Actual scratch location is at 128 bytes offset */
1285 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1286 uint32_t scratch_addr
1287 = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1288
1289 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1290 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1291 PIPE_CONTROL_GLOBAL_GTT_IVB |
1292 PIPE_CONTROL_CS_STALL |
1293 PIPE_CONTROL_QW_WRITE));
1294 wa_ctx_emit(batch, index, scratch_addr);
1295 wa_ctx_emit(batch, index, 0);
1296 wa_ctx_emit(batch, index, 0);
1297 wa_ctx_emit(batch, index, 0);
1298 }
1289 /* Pad to end of cacheline */ 1299 /* Pad to end of cacheline */
1290 while (index % CACHELINE_DWORDS) 1300 while (index % CACHELINE_DWORDS)
1291 wa_ctx_emit(batch, index, MI_NOOP); 1301 wa_ctx_emit(batch, index, MI_NOOP);
@@ -1298,12 +1308,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1298 uint32_t *const batch, 1308 uint32_t *const batch,
1299 uint32_t *offset) 1309 uint32_t *offset)
1300{ 1310{
1301 struct drm_device *dev = engine->dev;
1302 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1311 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1303 1312
1304 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1313 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1305 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 1314 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1306 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1315 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1307 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1316 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1308 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1317 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1309 wa_ctx_emit(batch, index, 1318 wa_ctx_emit(batch, index,
@@ -1312,7 +1321,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1312 } 1321 }
1313 1322
1314 /* WaClearTdlStateAckDirtyBits:bxt */ 1323 /* WaClearTdlStateAckDirtyBits:bxt */
1315 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1324 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1316 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1325 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1317 1326
1318 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1327 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1331,8 +1340,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1331 } 1340 }
1332 1341
1333 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1342 /* WaDisableCtxRestoreArbitration:skl,bxt */
1334 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1343 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1335 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1344 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1336 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1345 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1337 1346
1338 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1347 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1344,11 +1353,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1344{ 1353{
1345 int ret; 1354 int ret;
1346 1355
1347 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, 1356 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1348 PAGE_ALIGN(size)); 1357 PAGE_ALIGN(size));
1349 if (!engine->wa_ctx.obj) { 1358 if (IS_ERR(engine->wa_ctx.obj)) {
1350 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1359 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1351 return -ENOMEM; 1360 ret = PTR_ERR(engine->wa_ctx.obj);
1361 engine->wa_ctx.obj = NULL;
1362 return ret;
1352 } 1363 }
1353 1364
1354 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); 1365 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1382,9 +1393,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1382 WARN_ON(engine->id != RCS); 1393 WARN_ON(engine->id != RCS);
1383 1394
1384 /* update this when WA for higher Gen are added */ 1395 /* update this when WA for higher Gen are added */
1385 if (INTEL_INFO(engine->dev)->gen > 9) { 1396 if (INTEL_GEN(engine->i915) > 9) {
1386 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1397 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1387 INTEL_INFO(engine->dev)->gen); 1398 INTEL_GEN(engine->i915));
1388 return 0; 1399 return 0;
1389 } 1400 }
1390 1401
@@ -1404,7 +1415,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1404 batch = kmap_atomic(page); 1415 batch = kmap_atomic(page);
1405 offset = 0; 1416 offset = 0;
1406 1417
1407 if (INTEL_INFO(engine->dev)->gen == 8) { 1418 if (IS_GEN8(engine->i915)) {
1408 ret = gen8_init_indirectctx_bb(engine, 1419 ret = gen8_init_indirectctx_bb(engine,
1409 &wa_ctx->indirect_ctx, 1420 &wa_ctx->indirect_ctx,
1410 batch, 1421 batch,
@@ -1418,7 +1429,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1418 &offset); 1429 &offset);
1419 if (ret) 1430 if (ret)
1420 goto out; 1431 goto out;
1421 } else if (INTEL_INFO(engine->dev)->gen == 9) { 1432 } else if (IS_GEN9(engine->i915)) {
1422 ret = gen9_init_indirectctx_bb(engine, 1433 ret = gen9_init_indirectctx_bb(engine,
1423 &wa_ctx->indirect_ctx, 1434 &wa_ctx->indirect_ctx,
1424 batch, 1435 batch,
@@ -1444,7 +1455,7 @@ out:
1444 1455
1445static void lrc_init_hws(struct intel_engine_cs *engine) 1456static void lrc_init_hws(struct intel_engine_cs *engine)
1446{ 1457{
1447 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1458 struct drm_i915_private *dev_priv = engine->i915;
1448 1459
1449 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1460 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1450 (u32)engine->status_page.gfx_addr); 1461 (u32)engine->status_page.gfx_addr);
@@ -1453,8 +1464,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
1453 1464
1454static int gen8_init_common_ring(struct intel_engine_cs *engine) 1465static int gen8_init_common_ring(struct intel_engine_cs *engine)
1455{ 1466{
1456 struct drm_device *dev = engine->dev; 1467 struct drm_i915_private *dev_priv = engine->i915;
1457 struct drm_i915_private *dev_priv = dev->dev_private;
1458 unsigned int next_context_status_buffer_hw; 1468 unsigned int next_context_status_buffer_hw;
1459 1469
1460 lrc_init_hws(engine); 1470 lrc_init_hws(engine);
@@ -1501,8 +1511,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1501 1511
1502static int gen8_init_render_ring(struct intel_engine_cs *engine) 1512static int gen8_init_render_ring(struct intel_engine_cs *engine)
1503{ 1513{
1504 struct drm_device *dev = engine->dev; 1514 struct drm_i915_private *dev_priv = engine->i915;
1505 struct drm_i915_private *dev_priv = dev->dev_private;
1506 int ret; 1515 int ret;
1507 1516
1508 ret = gen8_init_common_ring(engine); 1517 ret = gen8_init_common_ring(engine);
@@ -1579,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1579 if (req->ctx->ppgtt && 1588 if (req->ctx->ppgtt &&
1580 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1589 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1581 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1590 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1582 !intel_vgpu_active(req->i915->dev)) { 1591 !intel_vgpu_active(req->i915)) {
1583 ret = intel_logical_ring_emit_pdps(req); 1592 ret = intel_logical_ring_emit_pdps(req);
1584 if (ret) 1593 if (ret)
1585 return ret; 1594 return ret;
@@ -1607,8 +1616,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1607 1616
1608static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1617static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1609{ 1618{
1610 struct drm_device *dev = engine->dev; 1619 struct drm_i915_private *dev_priv = engine->i915;
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 unsigned long flags; 1620 unsigned long flags;
1613 1621
1614 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1622 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1627,8 +1635,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1627 1635
1628static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1636static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1629{ 1637{
1630 struct drm_device *dev = engine->dev; 1638 struct drm_i915_private *dev_priv = engine->i915;
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 unsigned long flags; 1639 unsigned long flags;
1633 1640
1634 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1641 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1645,8 +1652,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
1645{ 1652{
1646 struct intel_ringbuffer *ringbuf = request->ringbuf; 1653 struct intel_ringbuffer *ringbuf = request->ringbuf;
1647 struct intel_engine_cs *engine = ringbuf->engine; 1654 struct intel_engine_cs *engine = ringbuf->engine;
1648 struct drm_device *dev = engine->dev; 1655 struct drm_i915_private *dev_priv = request->i915;
1649 struct drm_i915_private *dev_priv = dev->dev_private;
1650 uint32_t cmd; 1656 uint32_t cmd;
1651 int ret; 1657 int ret;
1652 1658
@@ -1687,9 +1693,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1687 struct intel_ringbuffer *ringbuf = request->ringbuf; 1693 struct intel_ringbuffer *ringbuf = request->ringbuf;
1688 struct intel_engine_cs *engine = ringbuf->engine; 1694 struct intel_engine_cs *engine = ringbuf->engine;
1689 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1695 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1690 bool vf_flush_wa = false; 1696 bool vf_flush_wa = false, dc_flush_wa = false;
1691 u32 flags = 0; 1697 u32 flags = 0;
1692 int ret; 1698 int ret;
1699 int len;
1693 1700
1694 flags |= PIPE_CONTROL_CS_STALL; 1701 flags |= PIPE_CONTROL_CS_STALL;
1695 1702
@@ -1714,11 +1721,23 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1714 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1721 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1715 * pipe control. 1722 * pipe control.
1716 */ 1723 */
1717 if (IS_GEN9(engine->dev)) 1724 if (IS_GEN9(request->i915))
1718 vf_flush_wa = true; 1725 vf_flush_wa = true;
1726
1727 /* WaForGAMHang:kbl */
1728 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1729 dc_flush_wa = true;
1719 } 1730 }
1720 1731
1721 ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); 1732 len = 6;
1733
1734 if (vf_flush_wa)
1735 len += 6;
1736
1737 if (dc_flush_wa)
1738 len += 12;
1739
1740 ret = intel_ring_begin(request, len);
1722 if (ret) 1741 if (ret)
1723 return ret; 1742 return ret;
1724 1743
@@ -1731,12 +1750,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1731 intel_logical_ring_emit(ringbuf, 0); 1750 intel_logical_ring_emit(ringbuf, 0);
1732 } 1751 }
1733 1752
1753 if (dc_flush_wa) {
1754 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1755 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
1756 intel_logical_ring_emit(ringbuf, 0);
1757 intel_logical_ring_emit(ringbuf, 0);
1758 intel_logical_ring_emit(ringbuf, 0);
1759 intel_logical_ring_emit(ringbuf, 0);
1760 }
1761
1734 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); 1762 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1735 intel_logical_ring_emit(ringbuf, flags); 1763 intel_logical_ring_emit(ringbuf, flags);
1736 intel_logical_ring_emit(ringbuf, scratch_addr); 1764 intel_logical_ring_emit(ringbuf, scratch_addr);
1737 intel_logical_ring_emit(ringbuf, 0); 1765 intel_logical_ring_emit(ringbuf, 0);
1738 intel_logical_ring_emit(ringbuf, 0); 1766 intel_logical_ring_emit(ringbuf, 0);
1739 intel_logical_ring_emit(ringbuf, 0); 1767 intel_logical_ring_emit(ringbuf, 0);
1768
1769 if (dc_flush_wa) {
1770 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1771 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
1772 intel_logical_ring_emit(ringbuf, 0);
1773 intel_logical_ring_emit(ringbuf, 0);
1774 intel_logical_ring_emit(ringbuf, 0);
1775 intel_logical_ring_emit(ringbuf, 0);
1776 }
1777
1740 intel_logical_ring_advance(ringbuf); 1778 intel_logical_ring_advance(ringbuf);
1741 1779
1742 return 0; 1780 return 0;
@@ -1782,11 +1820,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1782 */ 1820 */
1783#define WA_TAIL_DWORDS 2 1821#define WA_TAIL_DWORDS 2
1784 1822
1785static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1786{
1787 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1788}
1789
1790static int gen8_emit_request(struct drm_i915_gem_request *request) 1823static int gen8_emit_request(struct drm_i915_gem_request *request)
1791{ 1824{
1792 struct intel_ringbuffer *ringbuf = request->ringbuf; 1825 struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1802,7 +1835,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1802 intel_logical_ring_emit(ringbuf, 1835 intel_logical_ring_emit(ringbuf,
1803 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1836 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1804 intel_logical_ring_emit(ringbuf, 1837 intel_logical_ring_emit(ringbuf,
1805 hws_seqno_address(request->engine) | 1838 intel_hws_seqno_address(request->engine) |
1806 MI_FLUSH_DW_USE_GTT); 1839 MI_FLUSH_DW_USE_GTT);
1807 intel_logical_ring_emit(ringbuf, 0); 1840 intel_logical_ring_emit(ringbuf, 0);
1808 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1841 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1832,7 +1865,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1832 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1865 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1833 PIPE_CONTROL_CS_STALL | 1866 PIPE_CONTROL_CS_STALL |
1834 PIPE_CONTROL_QW_WRITE)); 1867 PIPE_CONTROL_QW_WRITE));
1835 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); 1868 intel_logical_ring_emit(ringbuf,
1869 intel_hws_seqno_address(request->engine));
1836 intel_logical_ring_emit(ringbuf, 0); 1870 intel_logical_ring_emit(ringbuf, 0);
1837 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1871 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1838 /* We're thrashing one dword of HWS. */ 1872 /* We're thrashing one dword of HWS. */
@@ -1894,7 +1928,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1894/** 1928/**
1895 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer 1929 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1896 * 1930 *
1897 * @ring: Engine Command Streamer. 1931 * @engine: Engine Command Streamer.
1898 * 1932 *
1899 */ 1933 */
1900void intel_logical_ring_cleanup(struct intel_engine_cs *engine) 1934void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@@ -1911,7 +1945,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1911 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1945 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1912 tasklet_kill(&engine->irq_tasklet); 1946 tasklet_kill(&engine->irq_tasklet);
1913 1947
1914 dev_priv = engine->dev->dev_private; 1948 dev_priv = engine->i915;
1915 1949
1916 if (engine->buffer) { 1950 if (engine->buffer) {
1917 intel_logical_ring_stop(engine); 1951 intel_logical_ring_stop(engine);
@@ -1928,18 +1962,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1928 i915_gem_object_unpin_map(engine->status_page.obj); 1962 i915_gem_object_unpin_map(engine->status_page.obj);
1929 engine->status_page.obj = NULL; 1963 engine->status_page.obj = NULL;
1930 } 1964 }
1965 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1931 1966
1932 engine->idle_lite_restore_wa = 0; 1967 engine->idle_lite_restore_wa = 0;
1933 engine->disable_lite_restore_wa = false; 1968 engine->disable_lite_restore_wa = false;
1934 engine->ctx_desc_template = 0; 1969 engine->ctx_desc_template = 0;
1935 1970
1936 lrc_destroy_wa_ctx_obj(engine); 1971 lrc_destroy_wa_ctx_obj(engine);
1937 engine->dev = NULL; 1972 engine->i915 = NULL;
1938} 1973}
1939 1974
1940static void 1975static void
1941logical_ring_default_vfuncs(struct drm_device *dev, 1976logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1942 struct intel_engine_cs *engine)
1943{ 1977{
1944 /* Default vfuncs which can be overriden by each engine. */ 1978 /* Default vfuncs which can be overriden by each engine. */
1945 engine->init_hw = gen8_init_common_ring; 1979 engine->init_hw = gen8_init_common_ring;
@@ -1950,7 +1984,7 @@ logical_ring_default_vfuncs(struct drm_device *dev,
1950 engine->emit_bb_start = gen8_emit_bb_start; 1984 engine->emit_bb_start = gen8_emit_bb_start;
1951 engine->get_seqno = gen8_get_seqno; 1985 engine->get_seqno = gen8_get_seqno;
1952 engine->set_seqno = gen8_set_seqno; 1986 engine->set_seqno = gen8_set_seqno;
1953 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1987 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1954 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1988 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1955 engine->set_seqno = bxt_a_set_seqno; 1989 engine->set_seqno = bxt_a_set_seqno;
1956 } 1990 }
@@ -1961,6 +1995,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1961{ 1995{
1962 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1996 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1963 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1997 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1998 init_waitqueue_head(&engine->irq_queue);
1964} 1999}
1965 2000
1966static int 2001static int
@@ -1981,32 +2016,68 @@ lrc_setup_hws(struct intel_engine_cs *engine,
1981 return 0; 2016 return 0;
1982} 2017}
1983 2018
1984static int 2019static const struct logical_ring_info {
1985logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) 2020 const char *name;
2021 unsigned exec_id;
2022 unsigned guc_id;
2023 u32 mmio_base;
2024 unsigned irq_shift;
2025} logical_rings[] = {
2026 [RCS] = {
2027 .name = "render ring",
2028 .exec_id = I915_EXEC_RENDER,
2029 .guc_id = GUC_RENDER_ENGINE,
2030 .mmio_base = RENDER_RING_BASE,
2031 .irq_shift = GEN8_RCS_IRQ_SHIFT,
2032 },
2033 [BCS] = {
2034 .name = "blitter ring",
2035 .exec_id = I915_EXEC_BLT,
2036 .guc_id = GUC_BLITTER_ENGINE,
2037 .mmio_base = BLT_RING_BASE,
2038 .irq_shift = GEN8_BCS_IRQ_SHIFT,
2039 },
2040 [VCS] = {
2041 .name = "bsd ring",
2042 .exec_id = I915_EXEC_BSD,
2043 .guc_id = GUC_VIDEO_ENGINE,
2044 .mmio_base = GEN6_BSD_RING_BASE,
2045 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
2046 },
2047 [VCS2] = {
2048 .name = "bsd2 ring",
2049 .exec_id = I915_EXEC_BSD,
2050 .guc_id = GUC_VIDEO_ENGINE2,
2051 .mmio_base = GEN8_BSD2_RING_BASE,
2052 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
2053 },
2054 [VECS] = {
2055 .name = "video enhancement ring",
2056 .exec_id = I915_EXEC_VEBOX,
2057 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2058 .mmio_base = VEBOX_RING_BASE,
2059 .irq_shift = GEN8_VECS_IRQ_SHIFT,
2060 },
2061};
2062
2063static struct intel_engine_cs *
2064logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
1986{ 2065{
2066 const struct logical_ring_info *info = &logical_rings[id];
1987 struct drm_i915_private *dev_priv = to_i915(dev); 2067 struct drm_i915_private *dev_priv = to_i915(dev);
1988 struct intel_context *dctx = dev_priv->kernel_context; 2068 struct intel_engine_cs *engine = &dev_priv->engine[id];
1989 enum forcewake_domains fw_domains; 2069 enum forcewake_domains fw_domains;
1990 int ret;
1991
1992 /* Intentionally left blank. */
1993 engine->buffer = NULL;
1994
1995 engine->dev = dev;
1996 INIT_LIST_HEAD(&engine->active_list);
1997 INIT_LIST_HEAD(&engine->request_list);
1998 i915_gem_batch_pool_init(dev, &engine->batch_pool);
1999 init_waitqueue_head(&engine->irq_queue);
2000 2070
2001 INIT_LIST_HEAD(&engine->buffers); 2071 engine->id = id;
2002 INIT_LIST_HEAD(&engine->execlist_queue); 2072 engine->name = info->name;
2003 INIT_LIST_HEAD(&engine->execlist_retired_req_list); 2073 engine->exec_id = info->exec_id;
2004 spin_lock_init(&engine->execlist_lock); 2074 engine->guc_id = info->guc_id;
2075 engine->mmio_base = info->mmio_base;
2005 2076
2006 tasklet_init(&engine->irq_tasklet, 2077 engine->i915 = dev_priv;
2007 intel_lrc_irq_handler, (unsigned long)engine);
2008 2078
2009 logical_ring_init_platform_invariants(engine); 2079 /* Intentionally left blank. */
2080 engine->buffer = NULL;
2010 2081
2011 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 2082 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2012 RING_ELSP(engine), 2083 RING_ELSP(engine),
@@ -2022,20 +2093,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2022 2093
2023 engine->fw_domains = fw_domains; 2094 engine->fw_domains = fw_domains;
2024 2095
2096 INIT_LIST_HEAD(&engine->active_list);
2097 INIT_LIST_HEAD(&engine->request_list);
2098 INIT_LIST_HEAD(&engine->buffers);
2099 INIT_LIST_HEAD(&engine->execlist_queue);
2100 spin_lock_init(&engine->execlist_lock);
2101
2102 tasklet_init(&engine->irq_tasklet,
2103 intel_lrc_irq_handler, (unsigned long)engine);
2104
2105 logical_ring_init_platform_invariants(engine);
2106 logical_ring_default_vfuncs(engine);
2107 logical_ring_default_irqs(engine, info->irq_shift);
2108
2109 intel_engine_init_hangcheck(engine);
2110 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2111
2112 return engine;
2113}
2114
2115static int
2116logical_ring_init(struct intel_engine_cs *engine)
2117{
2118 struct i915_gem_context *dctx = engine->i915->kernel_context;
2119 int ret;
2120
2025 ret = i915_cmd_parser_init_ring(engine); 2121 ret = i915_cmd_parser_init_ring(engine);
2026 if (ret) 2122 if (ret)
2027 goto error; 2123 goto error;
2028 2124
2029 ret = intel_lr_context_deferred_alloc(dctx, engine); 2125 ret = execlists_context_deferred_alloc(dctx, engine);
2030 if (ret) 2126 if (ret)
2031 goto error; 2127 goto error;
2032 2128
2033 /* As this is the default context, always pin it */ 2129 /* As this is the default context, always pin it */
2034 ret = intel_lr_context_do_pin(dctx, engine); 2130 ret = intel_lr_context_pin(dctx, engine);
2035 if (ret) { 2131 if (ret) {
2036 DRM_ERROR( 2132 DRM_ERROR("Failed to pin context for %s: %d\n",
2037 "Failed to pin and map ringbuffer %s: %d\n", 2133 engine->name, ret);
2038 engine->name, ret);
2039 goto error; 2134 goto error;
2040 } 2135 }
2041 2136
@@ -2055,22 +2150,12 @@ error:
2055 2150
2056static int logical_render_ring_init(struct drm_device *dev) 2151static int logical_render_ring_init(struct drm_device *dev)
2057{ 2152{
2058 struct drm_i915_private *dev_priv = dev->dev_private; 2153 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2059 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2060 int ret; 2154 int ret;
2061 2155
2062 engine->name = "render ring";
2063 engine->id = RCS;
2064 engine->exec_id = I915_EXEC_RENDER;
2065 engine->guc_id = GUC_RENDER_ENGINE;
2066 engine->mmio_base = RENDER_RING_BASE;
2067
2068 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
2069 if (HAS_L3_DPF(dev)) 2156 if (HAS_L3_DPF(dev))
2070 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2157 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2071 2158
2072 logical_ring_default_vfuncs(dev, engine);
2073
2074 /* Override some for render ring. */ 2159 /* Override some for render ring. */
2075 if (INTEL_INFO(dev)->gen >= 9) 2160 if (INTEL_INFO(dev)->gen >= 9)
2076 engine->init_hw = gen9_init_render_ring; 2161 engine->init_hw = gen9_init_render_ring;
@@ -2081,8 +2166,6 @@ static int logical_render_ring_init(struct drm_device *dev)
2081 engine->emit_flush = gen8_emit_flush_render; 2166 engine->emit_flush = gen8_emit_flush_render;
2082 engine->emit_request = gen8_emit_request_render; 2167 engine->emit_request = gen8_emit_request_render;
2083 2168
2084 engine->dev = dev;
2085
2086 ret = intel_init_pipe_control(engine); 2169 ret = intel_init_pipe_control(engine);
2087 if (ret) 2170 if (ret)
2088 return ret; 2171 return ret;
@@ -2098,7 +2181,7 @@ static int logical_render_ring_init(struct drm_device *dev)
2098 ret); 2181 ret);
2099 } 2182 }
2100 2183
2101 ret = logical_ring_init(dev, engine); 2184 ret = logical_ring_init(engine);
2102 if (ret) { 2185 if (ret) {
2103 lrc_destroy_wa_ctx_obj(engine); 2186 lrc_destroy_wa_ctx_obj(engine);
2104 } 2187 }
@@ -2108,70 +2191,30 @@ static int logical_render_ring_init(struct drm_device *dev)
2108 2191
2109static int logical_bsd_ring_init(struct drm_device *dev) 2192static int logical_bsd_ring_init(struct drm_device *dev)
2110{ 2193{
2111 struct drm_i915_private *dev_priv = dev->dev_private; 2194 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2112 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2113 2195
2114 engine->name = "bsd ring"; 2196 return logical_ring_init(engine);
2115 engine->id = VCS;
2116 engine->exec_id = I915_EXEC_BSD;
2117 engine->guc_id = GUC_VIDEO_ENGINE;
2118 engine->mmio_base = GEN6_BSD_RING_BASE;
2119
2120 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2121 logical_ring_default_vfuncs(dev, engine);
2122
2123 return logical_ring_init(dev, engine);
2124} 2197}
2125 2198
2126static int logical_bsd2_ring_init(struct drm_device *dev) 2199static int logical_bsd2_ring_init(struct drm_device *dev)
2127{ 2200{
2128 struct drm_i915_private *dev_priv = dev->dev_private; 2201 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2129 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
2130 2202
2131 engine->name = "bsd2 ring"; 2203 return logical_ring_init(engine);
2132 engine->id = VCS2;
2133 engine->exec_id = I915_EXEC_BSD;
2134 engine->guc_id = GUC_VIDEO_ENGINE2;
2135 engine->mmio_base = GEN8_BSD2_RING_BASE;
2136
2137 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2138 logical_ring_default_vfuncs(dev, engine);
2139
2140 return logical_ring_init(dev, engine);
2141} 2204}
2142 2205
2143static int logical_blt_ring_init(struct drm_device *dev) 2206static int logical_blt_ring_init(struct drm_device *dev)
2144{ 2207{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2208 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2146 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
2147 2209
2148 engine->name = "blitter ring"; 2210 return logical_ring_init(engine);
2149 engine->id = BCS;
2150 engine->exec_id = I915_EXEC_BLT;
2151 engine->guc_id = GUC_BLITTER_ENGINE;
2152 engine->mmio_base = BLT_RING_BASE;
2153
2154 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2155 logical_ring_default_vfuncs(dev, engine);
2156
2157 return logical_ring_init(dev, engine);
2158} 2211}
2159 2212
2160static int logical_vebox_ring_init(struct drm_device *dev) 2213static int logical_vebox_ring_init(struct drm_device *dev)
2161{ 2214{
2162 struct drm_i915_private *dev_priv = dev->dev_private; 2215 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2163 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
2164
2165 engine->name = "video enhancement ring";
2166 engine->id = VECS;
2167 engine->exec_id = I915_EXEC_VEBOX;
2168 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2169 engine->mmio_base = VEBOX_RING_BASE;
2170 2216
2171 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); 2217 return logical_ring_init(engine);
2172 logical_ring_default_vfuncs(dev, engine);
2173
2174 return logical_ring_init(dev, engine);
2175} 2218}
2176 2219
2177/** 2220/**
@@ -2232,7 +2275,7 @@ cleanup_render_ring:
2232} 2275}
2233 2276
2234static u32 2277static u32
2235make_rpcs(struct drm_device *dev) 2278make_rpcs(struct drm_i915_private *dev_priv)
2236{ 2279{
2237 u32 rpcs = 0; 2280 u32 rpcs = 0;
2238 2281
@@ -2240,7 +2283,7 @@ make_rpcs(struct drm_device *dev)
2240 * No explicit RPCS request is needed to ensure full 2283 * No explicit RPCS request is needed to ensure full
2241 * slice/subslice/EU enablement prior to Gen9. 2284 * slice/subslice/EU enablement prior to Gen9.
2242 */ 2285 */
2243 if (INTEL_INFO(dev)->gen < 9) 2286 if (INTEL_GEN(dev_priv) < 9)
2244 return 0; 2287 return 0;
2245 2288
2246 /* 2289 /*
@@ -2249,24 +2292,24 @@ make_rpcs(struct drm_device *dev)
2249 * must make an explicit request through RPCS for full 2292 * must make an explicit request through RPCS for full
2250 * enablement. 2293 * enablement.
2251 */ 2294 */
2252 if (INTEL_INFO(dev)->has_slice_pg) { 2295 if (INTEL_INFO(dev_priv)->has_slice_pg) {
2253 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 2296 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2254 rpcs |= INTEL_INFO(dev)->slice_total << 2297 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2255 GEN8_RPCS_S_CNT_SHIFT; 2298 GEN8_RPCS_S_CNT_SHIFT;
2256 rpcs |= GEN8_RPCS_ENABLE; 2299 rpcs |= GEN8_RPCS_ENABLE;
2257 } 2300 }
2258 2301
2259 if (INTEL_INFO(dev)->has_subslice_pg) { 2302 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2260 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2303 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2261 rpcs |= INTEL_INFO(dev)->subslice_per_slice << 2304 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2262 GEN8_RPCS_SS_CNT_SHIFT; 2305 GEN8_RPCS_SS_CNT_SHIFT;
2263 rpcs |= GEN8_RPCS_ENABLE; 2306 rpcs |= GEN8_RPCS_ENABLE;
2264 } 2307 }
2265 2308
2266 if (INTEL_INFO(dev)->has_eu_pg) { 2309 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2267 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2310 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2268 GEN8_RPCS_EU_MIN_SHIFT; 2311 GEN8_RPCS_EU_MIN_SHIFT;
2269 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2312 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2270 GEN8_RPCS_EU_MAX_SHIFT; 2313 GEN8_RPCS_EU_MAX_SHIFT;
2271 rpcs |= GEN8_RPCS_ENABLE; 2314 rpcs |= GEN8_RPCS_ENABLE;
2272 } 2315 }
@@ -2278,9 +2321,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2278{ 2321{
2279 u32 indirect_ctx_offset; 2322 u32 indirect_ctx_offset;
2280 2323
2281 switch (INTEL_INFO(engine->dev)->gen) { 2324 switch (INTEL_GEN(engine->i915)) {
2282 default: 2325 default:
2283 MISSING_CASE(INTEL_INFO(engine->dev)->gen); 2326 MISSING_CASE(INTEL_GEN(engine->i915));
2284 /* fall through */ 2327 /* fall through */
2285 case 9: 2328 case 9:
2286 indirect_ctx_offset = 2329 indirect_ctx_offset =
@@ -2296,13 +2339,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2296} 2339}
2297 2340
2298static int 2341static int
2299populate_lr_context(struct intel_context *ctx, 2342populate_lr_context(struct i915_gem_context *ctx,
2300 struct drm_i915_gem_object *ctx_obj, 2343 struct drm_i915_gem_object *ctx_obj,
2301 struct intel_engine_cs *engine, 2344 struct intel_engine_cs *engine,
2302 struct intel_ringbuffer *ringbuf) 2345 struct intel_ringbuffer *ringbuf)
2303{ 2346{
2304 struct drm_device *dev = engine->dev; 2347 struct drm_i915_private *dev_priv = ctx->i915;
2305 struct drm_i915_private *dev_priv = dev->dev_private;
2306 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2348 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2307 void *vaddr; 2349 void *vaddr;
2308 u32 *reg_state; 2350 u32 *reg_state;
@@ -2340,7 +2382,7 @@ populate_lr_context(struct intel_context *ctx,
2340 RING_CONTEXT_CONTROL(engine), 2382 RING_CONTEXT_CONTROL(engine),
2341 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2383 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2342 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2384 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2343 (HAS_RESOURCE_STREAMER(dev) ? 2385 (HAS_RESOURCE_STREAMER(dev_priv) ?
2344 CTX_CTRL_RS_CTX_ENABLE : 0))); 2386 CTX_CTRL_RS_CTX_ENABLE : 0)));
2345 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2387 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2346 0); 2388 0);
@@ -2429,7 +2471,7 @@ populate_lr_context(struct intel_context *ctx,
2429 if (engine->id == RCS) { 2471 if (engine->id == RCS) {
2430 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2472 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2431 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2473 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2432 make_rpcs(dev)); 2474 make_rpcs(dev_priv));
2433 } 2475 }
2434 2476
2435 i915_gem_object_unpin_map(ctx_obj); 2477 i915_gem_object_unpin_map(ctx_obj);
@@ -2438,39 +2480,8 @@ populate_lr_context(struct intel_context *ctx,
2438} 2480}
2439 2481
2440/** 2482/**
2441 * intel_lr_context_free() - free the LRC specific bits of a context
2442 * @ctx: the LR context to free.
2443 *
2444 * The real context freeing is done in i915_gem_context_free: this only
2445 * takes care of the bits that are LRC related: the per-engine backing
2446 * objects and the logical ringbuffer.
2447 */
2448void intel_lr_context_free(struct intel_context *ctx)
2449{
2450 int i;
2451
2452 for (i = I915_NUM_ENGINES; --i >= 0; ) {
2453 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2454 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2455
2456 if (!ctx_obj)
2457 continue;
2458
2459 if (ctx == ctx->i915->kernel_context) {
2460 intel_unpin_ringbuffer_obj(ringbuf);
2461 i915_gem_object_ggtt_unpin(ctx_obj);
2462 i915_gem_object_unpin_map(ctx_obj);
2463 }
2464
2465 WARN_ON(ctx->engine[i].pin_count);
2466 intel_ringbuffer_free(ringbuf);
2467 drm_gem_object_unreference(&ctx_obj->base);
2468 }
2469}
2470
2471/**
2472 * intel_lr_context_size() - return the size of the context for an engine 2483 * intel_lr_context_size() - return the size of the context for an engine
2473 * @ring: which engine to find the context size for 2484 * @engine: which engine to find the context size for
2474 * 2485 *
2475 * Each engine may require a different amount of space for a context image, 2486 * Each engine may require a different amount of space for a context image,
2476 * so when allocating (or copying) an image, this function can be used to 2487 * so when allocating (or copying) an image, this function can be used to
@@ -2486,11 +2497,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2486{ 2497{
2487 int ret = 0; 2498 int ret = 0;
2488 2499
2489 WARN_ON(INTEL_INFO(engine->dev)->gen < 8); 2500 WARN_ON(INTEL_GEN(engine->i915) < 8);
2490 2501
2491 switch (engine->id) { 2502 switch (engine->id) {
2492 case RCS: 2503 case RCS:
2493 if (INTEL_INFO(engine->dev)->gen >= 9) 2504 if (INTEL_GEN(engine->i915) >= 9)
2494 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2505 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2495 else 2506 else
2496 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2507 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2507,9 +2518,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2507} 2518}
2508 2519
2509/** 2520/**
2510 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context 2521 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2511 * @ctx: LR context to create. 2522 * @ctx: LR context to create.
2512 * @ring: engine to be used with the context. 2523 * @engine: engine to be used with the context.
2513 * 2524 *
2514 * This function can be called more than once, with different engines, if we plan 2525 * This function can be called more than once, with different engines, if we plan
2515 * to use the context with them. The context backing objects and the ringbuffers 2526 * to use the context with them. The context backing objects and the ringbuffers
@@ -2519,31 +2530,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2519 * 2530 *
2520 * Return: non-zero on error. 2531 * Return: non-zero on error.
2521 */ 2532 */
2522 2533static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2523int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2534 struct intel_engine_cs *engine)
2524 struct intel_engine_cs *engine)
2525{ 2535{
2526 struct drm_device *dev = engine->dev;
2527 struct drm_i915_gem_object *ctx_obj; 2536 struct drm_i915_gem_object *ctx_obj;
2537 struct intel_context *ce = &ctx->engine[engine->id];
2528 uint32_t context_size; 2538 uint32_t context_size;
2529 struct intel_ringbuffer *ringbuf; 2539 struct intel_ringbuffer *ringbuf;
2530 int ret; 2540 int ret;
2531 2541
2532 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2542 WARN_ON(ce->state);
2533 WARN_ON(ctx->engine[engine->id].state);
2534 2543
2535 context_size = round_up(intel_lr_context_size(engine), 4096); 2544 context_size = round_up(intel_lr_context_size(engine), 4096);
2536 2545
2537 /* One extra page as the sharing data between driver and GuC */ 2546 /* One extra page as the sharing data between driver and GuC */
2538 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2547 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2539 2548
2540 ctx_obj = i915_gem_alloc_object(dev, context_size); 2549 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2541 if (!ctx_obj) { 2550 if (IS_ERR(ctx_obj)) {
2542 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2551 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2543 return -ENOMEM; 2552 return PTR_ERR(ctx_obj);
2544 } 2553 }
2545 2554
2546 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); 2555 ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
2547 if (IS_ERR(ringbuf)) { 2556 if (IS_ERR(ringbuf)) {
2548 ret = PTR_ERR(ringbuf); 2557 ret = PTR_ERR(ringbuf);
2549 goto error_deref_obj; 2558 goto error_deref_obj;
@@ -2555,48 +2564,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2555 goto error_ringbuf; 2564 goto error_ringbuf;
2556 } 2565 }
2557 2566
2558 ctx->engine[engine->id].ringbuf = ringbuf; 2567 ce->ringbuf = ringbuf;
2559 ctx->engine[engine->id].state = ctx_obj; 2568 ce->state = ctx_obj;
2560 2569 ce->initialised = engine->init_context == NULL;
2561 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2562 struct drm_i915_gem_request *req;
2563
2564 req = i915_gem_request_alloc(engine, ctx);
2565 if (IS_ERR(req)) {
2566 ret = PTR_ERR(req);
2567 DRM_ERROR("ring create req: %d\n", ret);
2568 goto error_ringbuf;
2569 }
2570 2570
2571 ret = engine->init_context(req);
2572 i915_add_request_no_flush(req);
2573 if (ret) {
2574 DRM_ERROR("ring init context: %d\n",
2575 ret);
2576 goto error_ringbuf;
2577 }
2578 }
2579 return 0; 2571 return 0;
2580 2572
2581error_ringbuf: 2573error_ringbuf:
2582 intel_ringbuffer_free(ringbuf); 2574 intel_ringbuffer_free(ringbuf);
2583error_deref_obj: 2575error_deref_obj:
2584 drm_gem_object_unreference(&ctx_obj->base); 2576 drm_gem_object_unreference(&ctx_obj->base);
2585 ctx->engine[engine->id].ringbuf = NULL; 2577 ce->ringbuf = NULL;
2586 ctx->engine[engine->id].state = NULL; 2578 ce->state = NULL;
2587 return ret; 2579 return ret;
2588} 2580}
2589 2581
2590void intel_lr_context_reset(struct drm_i915_private *dev_priv, 2582void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2591 struct intel_context *ctx) 2583 struct i915_gem_context *ctx)
2592{ 2584{
2593 struct intel_engine_cs *engine; 2585 struct intel_engine_cs *engine;
2594 2586
2595 for_each_engine(engine, dev_priv) { 2587 for_each_engine(engine, dev_priv) {
2596 struct drm_i915_gem_object *ctx_obj = 2588 struct intel_context *ce = &ctx->engine[engine->id];
2597 ctx->engine[engine->id].state; 2589 struct drm_i915_gem_object *ctx_obj = ce->state;
2598 struct intel_ringbuffer *ringbuf =
2599 ctx->engine[engine->id].ringbuf;
2600 void *vaddr; 2590 void *vaddr;
2601 uint32_t *reg_state; 2591 uint32_t *reg_state;
2602 2592
@@ -2615,7 +2605,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2615 2605
2616 i915_gem_object_unpin_map(ctx_obj); 2606 i915_gem_object_unpin_map(ctx_obj);
2617 2607
2618 ringbuf->head = 0; 2608 ce->ringbuf->head = 0;
2619 ringbuf->tail = 0; 2609 ce->ringbuf->tail = 0;
2620 } 2610 }
2621} 2611}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..2b8255c19dcc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -57,6 +57,11 @@
57#define GEN8_CSB_READ_PTR(csb_status) \ 57#define GEN8_CSB_READ_PTR(csb_status) \
58 (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) 58 (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
59 59
60enum {
61 INTEL_CONTEXT_SCHEDULE_IN = 0,
62 INTEL_CONTEXT_SCHEDULE_OUT,
63};
64
60/* Logical Rings */ 65/* Logical Rings */
61int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); 66int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
62int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); 67int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) 104#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 105#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
101 106
102void intel_lr_context_free(struct intel_context *ctx); 107struct i915_gem_context;
108
103uint32_t intel_lr_context_size(struct intel_engine_cs *engine); 109uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
104int intel_lr_context_deferred_alloc(struct intel_context *ctx, 110void intel_lr_context_unpin(struct i915_gem_context *ctx,
105 struct intel_engine_cs *engine);
106void intel_lr_context_unpin(struct intel_context *ctx,
107 struct intel_engine_cs *engine); 111 struct intel_engine_cs *engine);
108 112
109struct drm_i915_private; 113struct drm_i915_private;
110 114
111void intel_lr_context_reset(struct drm_i915_private *dev_priv, 115void intel_lr_context_reset(struct drm_i915_private *dev_priv,
112 struct intel_context *ctx); 116 struct i915_gem_context *ctx);
113uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 117uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
114 struct intel_engine_cs *engine); 118 struct intel_engine_cs *engine);
115 119
116u32 intel_execlists_ctx_id(struct intel_context *ctx,
117 struct intel_engine_cs *engine);
118
119/* Execlists */ 120/* Execlists */
120int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 121int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
122 int enable_execlists);
121struct i915_execbuffer_params; 123struct i915_execbuffer_params;
122int intel_execlists_submission(struct i915_execbuffer_params *params, 124int intel_execlists_submission(struct i915_execbuffer_params *params,
123 struct drm_i915_gem_execbuffer2 *args, 125 struct drm_i915_gem_execbuffer2 *args,
124 struct list_head *vmas); 126 struct list_head *vmas);
125 127
126void intel_execlists_retire_requests(struct intel_engine_cs *engine); 128void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
127 129
128#endif /* _INTEL_LRC_H_ */ 130#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc53c0dd34d0..e9082185a375 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
190 /* Set the dithering flag on LVDS as needed, note that there is no 190 /* Set the dithering flag on LVDS as needed, note that there is no
191 * special lvds dither control bit on pch-split platforms, dithering is 191 * special lvds dither control bit on pch-split platforms, dithering is
192 * only controlled through the PIPECONF reg. */ 192 * only controlled through the PIPECONF reg. */
193 if (INTEL_INFO(dev)->gen == 4) { 193 if (IS_GEN4(dev_priv)) {
194 /* Bspec wording suggests that LVDS port dithering only exists 194 /* Bspec wording suggests that LVDS port dithering only exists
195 * for 18bpp panels. */ 195 * for 18bpp panels. */
196 if (crtc->config->dither && crtc->config->pipe_bpp == 18) 196 if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
548 .get_modes = intel_lvds_get_modes, 548 .get_modes = intel_lvds_get_modes,
549 .mode_valid = intel_lvds_mode_valid, 549 .mode_valid = intel_lvds_mode_valid,
550 .best_encoder = intel_best_encoder,
551}; 550};
552 551
553static const struct drm_connector_funcs intel_lvds_connector_funcs = { 552static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -556,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
556 .fill_modes = drm_helper_probe_single_connector_modes, 555 .fill_modes = drm_helper_probe_single_connector_modes,
557 .set_property = intel_lvds_set_property, 556 .set_property = intel_lvds_set_property,
558 .atomic_get_property = intel_connector_atomic_get_property, 557 .atomic_get_property = intel_connector_atomic_get_property,
558 .early_unregister = intel_connector_unregister,
559 .destroy = intel_lvds_destroy, 559 .destroy = intel_lvds_destroy,
560 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 560 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
561 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 561 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -978,7 +978,7 @@ void intel_lvds_init(struct drm_device *dev)
978 DRM_MODE_CONNECTOR_LVDS); 978 DRM_MODE_CONNECTOR_LVDS);
979 979
980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
981 DRM_MODE_ENCODER_LVDS, NULL); 981 DRM_MODE_ENCODER_LVDS, "LVDS");
982 982
983 intel_encoder->enable = intel_enable_lvds; 983 intel_encoder->enable = intel_enable_lvds;
984 intel_encoder->pre_enable = intel_pre_enable_lvds; 984 intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -992,7 +992,6 @@ void intel_lvds_init(struct drm_device *dev)
992 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 992 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
993 intel_encoder->get_config = intel_lvds_get_config; 993 intel_encoder->get_config = intel_lvds_get_config;
994 intel_connector->get_hw_state = intel_connector_get_hw_state; 994 intel_connector->get_hw_state = intel_connector_get_hw_state;
995 intel_connector->unregister = intel_connector_unregister;
996 995
997 intel_connector_attach_encoder(intel_connector, intel_encoder); 996 intel_connector_attach_encoder(intel_connector, intel_encoder);
998 intel_encoder->type = INTEL_OUTPUT_LVDS; 997 intel_encoder->type = INTEL_OUTPUT_LVDS;
@@ -1082,6 +1081,8 @@ void intel_lvds_init(struct drm_device *dev)
1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 1081 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
1083 if (fixed_mode) { 1082 if (fixed_mode) {
1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1083 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1084 connector->display_info.width_mm = fixed_mode->width_mm;
1085 connector->display_info.height_mm = fixed_mode->height_mm;
1085 goto out; 1086 goto out;
1086 } 1087 }
1087 } 1088 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..3c1482b8f2f4 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -156,6 +156,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
156 "Platform that should have a MOCS table does not.\n"); 156 "Platform that should have a MOCS table does not.\n");
157 } 157 }
158 158
159 /* WaDisableSkipCaching:skl,bxt,kbl */
160 if (IS_GEN9(dev_priv)) {
161 int i;
162
163 for (i = 0; i < table->size; i++)
164 if (WARN_ON(table->table[i].l3cc_value &
165 (L3_ESC(1) | L3_SCC(0x7))))
166 return false;
167 }
168
159 return result; 169 return result;
160} 170}
161 171
@@ -189,7 +199,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
189 */ 199 */
190int intel_mocs_init_engine(struct intel_engine_cs *engine) 200int intel_mocs_init_engine(struct intel_engine_cs *engine)
191{ 201{
192 struct drm_i915_private *dev_priv = to_i915(engine->dev); 202 struct drm_i915_private *dev_priv = engine->i915;
193 struct drm_i915_mocs_table table; 203 struct drm_i915_mocs_table table;
194 unsigned int index; 204 unsigned int index;
195 205
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 99e26034ae8d..f6d8a21d2c49 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -240,10 +240,11 @@ struct opregion_asle_ext {
240 240
241#define MAX_DSLP 1500 241#define MAX_DSLP 1500
242 242
243static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 243static int swsci(struct drm_i915_private *dev_priv,
244 u32 function, u32 parm, u32 *parm_out)
244{ 245{
245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 246 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
247 struct pci_dev *pdev = dev_priv->dev->pdev;
247 u32 main_function, sub_function, scic; 248 u32 main_function, sub_function, scic;
248 u16 swsci_val; 249 u16 swsci_val;
249 u32 dslp; 250 u32 dslp;
@@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
293 swsci->scic = scic; 294 swsci->scic = scic;
294 295
295 /* Ensure SCI event is selected and event trigger is cleared. */ 296 /* Ensure SCI event is selected and event trigger is cleared. */
296 pci_read_config_word(dev->pdev, SWSCI, &swsci_val); 297 pci_read_config_word(pdev, SWSCI, &swsci_val);
297 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { 298 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
298 swsci_val |= SWSCI_SCISEL; 299 swsci_val |= SWSCI_SCISEL;
299 swsci_val &= ~SWSCI_GSSCIE; 300 swsci_val &= ~SWSCI_GSSCIE;
300 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 301 pci_write_config_word(pdev, SWSCI, swsci_val);
301 } 302 }
302 303
303 /* Use event trigger to tell bios to check the mail. */ 304 /* Use event trigger to tell bios to check the mail. */
304 swsci_val |= SWSCI_GSSCIE; 305 swsci_val |= SWSCI_GSSCIE;
305 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 306 pci_write_config_word(pdev, SWSCI, swsci_val);
306 307
307 /* Poll for the result. */ 308 /* Poll for the result. */
308#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) 309#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
336int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 337int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
337 bool enable) 338 bool enable)
338{ 339{
339 struct drm_device *dev = intel_encoder->base.dev; 340 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
340 u32 parm = 0; 341 u32 parm = 0;
341 u32 type = 0; 342 u32 type = 0;
342 u32 port; 343 u32 port;
343 344
344 /* don't care about old stuff for now */ 345 /* don't care about old stuff for now */
345 if (!HAS_DDI(dev)) 346 if (!HAS_DDI(dev_priv))
346 return 0; 347 return 0;
347 348
348 if (intel_encoder->type == INTEL_OUTPUT_DSI) 349 if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
382 383
383 parm |= type << (16 + port * 3); 384 parm |= type << (16 + port * 3);
384 385
385 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); 386 return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
386} 387}
387 388
388static const struct { 389static const struct {
@@ -396,27 +397,28 @@ static const struct {
396 { PCI_D3cold, 0x04 }, 397 { PCI_D3cold, 0x04 },
397}; 398};
398 399
399int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 400int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
401 pci_power_t state)
400{ 402{
401 int i; 403 int i;
402 404
403 if (!HAS_DDI(dev)) 405 if (!HAS_DDI(dev_priv))
404 return 0; 406 return 0;
405 407
406 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { 408 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
407 if (state == power_state_map[i].pci_power_state) 409 if (state == power_state_map[i].pci_power_state)
408 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, 410 return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
409 power_state_map[i].parm, NULL); 411 power_state_map[i].parm, NULL);
410 } 412 }
411 413
412 return -EINVAL; 414 return -EINVAL;
413} 415}
414 416
415static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 417static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
416{ 418{
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct intel_connector *connector; 419 struct intel_connector *connector;
419 struct opregion_asle *asle = dev_priv->opregion.asle; 420 struct opregion_asle *asle = dev_priv->opregion.asle;
421 struct drm_device *dev = dev_priv->dev;
420 422
421 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 423 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
422 424
@@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
449 return 0; 451 return 0;
450} 452}
451 453
452static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 454static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
453{ 455{
454 /* alsi is the current ALS reading in lux. 0 indicates below sensor 456 /* alsi is the current ALS reading in lux. 0 indicates below sensor
455 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 457 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
457 return ASLC_ALS_ILLUM_FAILED; 459 return ASLC_ALS_ILLUM_FAILED;
458} 460}
459 461
460static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 462static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
461{ 463{
462 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 464 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
463 return ASLC_PWM_FREQ_FAILED; 465 return ASLC_PWM_FREQ_FAILED;
464} 466}
465 467
466static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 468static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
467{ 469{
468 /* Panel fitting is currently controlled by the X code, so this is a 470 /* Panel fitting is currently controlled by the X code, so this is a
469 noop until modesetting support works fully */ 471 noop until modesetting support works fully */
@@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
471 return ASLC_PFIT_FAILED; 473 return ASLC_PFIT_FAILED;
472} 474}
473 475
474static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) 476static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
475{ 477{
476 DRM_DEBUG_DRIVER("SROT is not supported\n"); 478 DRM_DEBUG_DRIVER("SROT is not supported\n");
477 return ASLC_ROTATION_ANGLES_FAILED; 479 return ASLC_ROTATION_ANGLES_FAILED;
478} 480}
479 481
480static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) 482static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
481{ 483{
482 if (!iuer) 484 if (!iuer)
483 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); 485 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
495 return ASLC_BUTTON_ARRAY_FAILED; 497 return ASLC_BUTTON_ARRAY_FAILED;
496} 498}
497 499
498static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) 500static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
499{ 501{
500 if (iuer & ASLE_IUER_CONVERTIBLE) 502 if (iuer & ASLE_IUER_CONVERTIBLE)
501 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); 503 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
505 return ASLC_CONVERTIBLE_FAILED; 507 return ASLC_CONVERTIBLE_FAILED;
506} 508}
507 509
508static u32 asle_set_docking(struct drm_device *dev, u32 iuer) 510static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
509{ 511{
510 if (iuer & ASLE_IUER_DOCKING) 512 if (iuer & ASLE_IUER_DOCKING)
511 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); 513 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
515 return ASLC_DOCKING_FAILED; 517 return ASLC_DOCKING_FAILED;
516} 518}
517 519
518static u32 asle_isct_state(struct drm_device *dev) 520static u32 asle_isct_state(struct drm_i915_private *dev_priv)
519{ 521{
520 DRM_DEBUG_DRIVER("ISCT is not supported\n"); 522 DRM_DEBUG_DRIVER("ISCT is not supported\n");
521 return ASLC_ISCT_STATE_FAILED; 523 return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work)
527 container_of(work, struct intel_opregion, asle_work); 529 container_of(work, struct intel_opregion, asle_work);
528 struct drm_i915_private *dev_priv = 530 struct drm_i915_private *dev_priv =
529 container_of(opregion, struct drm_i915_private, opregion); 531 container_of(opregion, struct drm_i915_private, opregion);
530 struct drm_device *dev = dev_priv->dev;
531 struct opregion_asle *asle = dev_priv->opregion.asle; 532 struct opregion_asle *asle = dev_priv->opregion.asle;
532 u32 aslc_stat = 0; 533 u32 aslc_stat = 0;
533 u32 aslc_req; 534 u32 aslc_req;
@@ -544,40 +545,38 @@ static void asle_work(struct work_struct *work)
544 } 545 }
545 546
546 if (aslc_req & ASLC_SET_ALS_ILLUM) 547 if (aslc_req & ASLC_SET_ALS_ILLUM)
547 aslc_stat |= asle_set_als_illum(dev, asle->alsi); 548 aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
548 549
549 if (aslc_req & ASLC_SET_BACKLIGHT) 550 if (aslc_req & ASLC_SET_BACKLIGHT)
550 aslc_stat |= asle_set_backlight(dev, asle->bclp); 551 aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
551 552
552 if (aslc_req & ASLC_SET_PFIT) 553 if (aslc_req & ASLC_SET_PFIT)
553 aslc_stat |= asle_set_pfit(dev, asle->pfit); 554 aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
554 555
555 if (aslc_req & ASLC_SET_PWM_FREQ) 556 if (aslc_req & ASLC_SET_PWM_FREQ)
556 aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); 557 aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
557 558
558 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) 559 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
559 aslc_stat |= asle_set_supported_rotation_angles(dev, 560 aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
560 asle->srot); 561 asle->srot);
561 562
562 if (aslc_req & ASLC_BUTTON_ARRAY) 563 if (aslc_req & ASLC_BUTTON_ARRAY)
563 aslc_stat |= asle_set_button_array(dev, asle->iuer); 564 aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
564 565
565 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) 566 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
566 aslc_stat |= asle_set_convertible(dev, asle->iuer); 567 aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
567 568
568 if (aslc_req & ASLC_DOCKING_INDICATOR) 569 if (aslc_req & ASLC_DOCKING_INDICATOR)
569 aslc_stat |= asle_set_docking(dev, asle->iuer); 570 aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
570 571
571 if (aslc_req & ASLC_ISCT_STATE_CHANGE) 572 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
572 aslc_stat |= asle_isct_state(dev); 573 aslc_stat |= asle_isct_state(dev_priv);
573 574
574 asle->aslc = aslc_stat; 575 asle->aslc = aslc_stat;
575} 576}
576 577
577void intel_opregion_asle_intr(struct drm_device *dev) 578void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
578{ 579{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 if (dev_priv->opregion.asle) 580 if (dev_priv->opregion.asle)
582 schedule_work(&dev_priv->opregion.asle_work); 581 schedule_work(&dev_priv->opregion.asle_work);
583} 582}
@@ -658,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
658 } 657 }
659} 658}
660 659
661static void intel_didl_outputs(struct drm_device *dev) 660static void intel_didl_outputs(struct drm_i915_private *dev_priv)
662{ 661{
663 struct drm_i915_private *dev_priv = dev->dev_private;
664 struct intel_opregion *opregion = &dev_priv->opregion; 662 struct intel_opregion *opregion = &dev_priv->opregion;
663 struct pci_dev *pdev = dev_priv->dev->pdev;
665 struct drm_connector *connector; 664 struct drm_connector *connector;
666 acpi_handle handle; 665 acpi_handle handle;
667 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 666 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev)
670 u32 temp, max_outputs; 669 u32 temp, max_outputs;
671 int i = 0; 670 int i = 0;
672 671
673 handle = ACPI_HANDLE(&dev->pdev->dev); 672 handle = ACPI_HANDLE(&pdev->dev);
674 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 673 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
675 return; 674 return;
676 675
@@ -725,7 +724,7 @@ end:
725 724
726blind_set: 725blind_set:
727 i = 0; 726 i = 0;
728 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 727 list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
729 int output_type = ACPI_OTHER_OUTPUT; 728 int output_type = ACPI_OTHER_OUTPUT;
730 if (i >= max_outputs) { 729 if (i >= max_outputs) {
731 DRM_DEBUG_KMS("More than %u outputs in connector list\n", 730 DRM_DEBUG_KMS("More than %u outputs in connector list\n",
@@ -761,9 +760,8 @@ blind_set:
761 goto end; 760 goto end;
762} 761}
763 762
764static void intel_setup_cadls(struct drm_device *dev) 763static void intel_setup_cadls(struct drm_i915_private *dev_priv)
765{ 764{
766 struct drm_i915_private *dev_priv = dev->dev_private;
767 struct intel_opregion *opregion = &dev_priv->opregion; 765 struct intel_opregion *opregion = &dev_priv->opregion;
768 int i = 0; 766 int i = 0;
769 u32 disp_id; 767 u32 disp_id;
@@ -780,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev)
780 } while (++i < 8 && disp_id != 0); 778 } while (++i < 8 && disp_id != 0);
781} 779}
782 780
783void intel_opregion_init(struct drm_device *dev) 781void intel_opregion_register(struct drm_i915_private *dev_priv)
784{ 782{
785 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_opregion *opregion = &dev_priv->opregion; 783 struct intel_opregion *opregion = &dev_priv->opregion;
787 784
788 if (!opregion->header) 785 if (!opregion->header)
789 return; 786 return;
790 787
791 if (opregion->acpi) { 788 if (opregion->acpi) {
792 intel_didl_outputs(dev); 789 intel_didl_outputs(dev_priv);
793 intel_setup_cadls(dev); 790 intel_setup_cadls(dev_priv);
794 791
795 /* Notify BIOS we are ready to handle ACPI video ext notifs. 792 /* Notify BIOS we are ready to handle ACPI video ext notifs.
796 * Right now, all the events are handled by the ACPI video module. 793 * Right now, all the events are handled by the ACPI video module.
@@ -808,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev)
808 } 805 }
809} 806}
810 807
811void intel_opregion_fini(struct drm_device *dev) 808void intel_opregion_unregister(struct drm_i915_private *dev_priv)
812{ 809{
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 struct intel_opregion *opregion = &dev_priv->opregion; 810 struct intel_opregion *opregion = &dev_priv->opregion;
815 811
816 if (!opregion->header) 812 if (!opregion->header)
@@ -842,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev)
842 opregion->lid_state = NULL; 838 opregion->lid_state = NULL;
843} 839}
844 840
845static void swsci_setup(struct drm_device *dev) 841static void swsci_setup(struct drm_i915_private *dev_priv)
846{ 842{
847 struct drm_i915_private *dev_priv = dev->dev_private;
848 struct intel_opregion *opregion = &dev_priv->opregion; 843 struct intel_opregion *opregion = &dev_priv->opregion;
849 bool requested_callbacks = false; 844 bool requested_callbacks = false;
850 u32 tmp; 845 u32 tmp;
@@ -854,7 +849,7 @@ static void swsci_setup(struct drm_device *dev)
854 opregion->swsci_sbcb_sub_functions = 1; 849 opregion->swsci_sbcb_sub_functions = 1;
855 850
856 /* We use GBDA to ask for supported GBDA calls. */ 851 /* We use GBDA to ask for supported GBDA calls. */
857 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { 852 if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
858 /* make the bits match the sub-function codes */ 853 /* make the bits match the sub-function codes */
859 tmp <<= 1; 854 tmp <<= 1;
860 opregion->swsci_gbda_sub_functions |= tmp; 855 opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +860,7 @@ static void swsci_setup(struct drm_device *dev)
865 * must not call interfaces that are not specifically requested by the 860 * must not call interfaces that are not specifically requested by the
866 * bios. 861 * bios.
867 */ 862 */
868 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { 863 if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
869 /* here, the bits already match sub-function codes */ 864 /* here, the bits already match sub-function codes */
870 opregion->swsci_sbcb_sub_functions |= tmp; 865 opregion->swsci_sbcb_sub_functions |= tmp;
871 requested_callbacks = true; 866 requested_callbacks = true;
@@ -876,7 +871,7 @@ static void swsci_setup(struct drm_device *dev)
876 * the callback is _requested_. But we still can't call interfaces that 871 * the callback is _requested_. But we still can't call interfaces that
877 * are not requested. 872 * are not requested.
878 */ 873 */
879 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { 874 if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
880 /* make the bits match the sub-function codes */ 875 /* make the bits match the sub-function codes */
881 u32 low = tmp & 0x7ff; 876 u32 low = tmp & 0x7ff;
882 u32 high = tmp & ~0xfff; /* bit 11 is reserved */ 877 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
918 { } 913 { }
919}; 914};
920 915
921int intel_opregion_setup(struct drm_device *dev) 916int intel_opregion_setup(struct drm_i915_private *dev_priv)
922{ 917{
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_opregion *opregion = &dev_priv->opregion; 918 struct intel_opregion *opregion = &dev_priv->opregion;
919 struct pci_dev *pdev = dev_priv->dev->pdev;
925 u32 asls, mboxes; 920 u32 asls, mboxes;
926 char buf[sizeof(OPREGION_SIGNATURE)]; 921 char buf[sizeof(OPREGION_SIGNATURE)];
927 int err = 0; 922 int err = 0;
@@ -933,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev)
933 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); 928 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
934 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); 929 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
935 930
936 pci_read_config_dword(dev->pdev, ASLS, &asls); 931 pci_read_config_dword(pdev, ASLS, &asls);
937 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); 932 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
938 if (asls == 0) { 933 if (asls == 0) {
939 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); 934 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev)
965 if (mboxes & MBOX_SWSCI) { 960 if (mboxes & MBOX_SWSCI) {
966 DRM_DEBUG_DRIVER("SWSCI supported\n"); 961 DRM_DEBUG_DRIVER("SWSCI supported\n");
967 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 962 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
968 swsci_setup(dev); 963 swsci_setup(dev_priv);
969 } 964 }
970 965
971 if (mboxes & MBOX_ASLE) { 966 if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1009,12 @@ err_out:
1014} 1009}
1015 1010
1016int 1011int
1017intel_opregion_get_panel_type(struct drm_device *dev) 1012intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1018{ 1013{
1019 u32 panel_details; 1014 u32 panel_details;
1020 int ret; 1015 int ret;
1021 1016
1022 ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); 1017 ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
1023 if (ret) { 1018 if (ret) {
1024 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", 1019 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
1025 ret); 1020 ret);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..eb93f90bb74d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
168}; 168};
169 169
170struct intel_overlay { 170struct intel_overlay {
171 struct drm_device *dev; 171 struct drm_i915_private *i915;
172 struct intel_crtc *crtc; 172 struct intel_crtc *crtc;
173 struct drm_i915_gem_object *vid_bo; 173 struct drm_i915_gem_object *vid_bo;
174 struct drm_i915_gem_object *old_vid_bo; 174 struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
190static struct overlay_registers __iomem * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 193 struct drm_i915_private *dev_priv = overlay->i915;
194 struct i915_ggtt *ggtt = &dev_priv->ggtt;
195 struct overlay_registers __iomem *regs; 194 struct overlay_registers __iomem *regs;
196 195
197 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
198 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
199 else 198 else
200 regs = io_mapping_map_wc(ggtt->mappable, 199 regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
201 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 200 overlay->flip_addr,
201 PAGE_SIZE);
202 202
203 return regs; 203 return regs;
204} 204}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
206static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 206static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
207 struct overlay_registers __iomem *regs) 207 struct overlay_registers __iomem *regs)
208{ 208{
209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
210 io_mapping_unmap(regs); 210 io_mapping_unmap(regs);
211} 211}
212 212
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
232/* overlay needs to be disable in OCMD reg */ 232/* overlay needs to be disable in OCMD reg */
233static int intel_overlay_on(struct intel_overlay *overlay) 233static int intel_overlay_on(struct intel_overlay *overlay)
234{ 234{
235 struct drm_device *dev = overlay->dev; 235 struct drm_i915_private *dev_priv = overlay->i915;
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 236 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
238 struct drm_i915_gem_request *req; 237 struct drm_i915_gem_request *req;
239 int ret; 238 int ret;
240 239
241 WARN_ON(overlay->active); 240 WARN_ON(overlay->active);
242 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 241 WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
243 242
244 req = i915_gem_request_alloc(engine, NULL); 243 req = i915_gem_request_alloc(engine, NULL);
245 if (IS_ERR(req)) 244 if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266static int intel_overlay_continue(struct intel_overlay *overlay, 265static int intel_overlay_continue(struct intel_overlay *overlay,
267 bool load_polyphase_filter) 266 bool load_polyphase_filter)
268{ 267{
269 struct drm_device *dev = overlay->dev; 268 struct drm_i915_private *dev_priv = overlay->i915;
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 269 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
272 struct drm_i915_gem_request *req; 270 struct drm_i915_gem_request *req;
273 u32 flip_addr = overlay->flip_addr; 271 u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
335/* overlay needs to be disabled in OCMD reg */ 333/* overlay needs to be disabled in OCMD reg */
336static int intel_overlay_off(struct intel_overlay *overlay) 334static int intel_overlay_off(struct intel_overlay *overlay)
337{ 335{
338 struct drm_device *dev = overlay->dev; 336 struct drm_i915_private *dev_priv = overlay->i915;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 337 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
341 struct drm_i915_gem_request *req; 338 struct drm_i915_gem_request *req;
342 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 intel_ring_emit(engine, flip_addr); 362 intel_ring_emit(engine, flip_addr);
366 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 363 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 /* turn overlay off */ 364 /* turn overlay off */
368 if (IS_I830(dev)) { 365 if (IS_I830(dev_priv)) {
369 /* Workaround: Don't disable the overlay fully, since otherwise 366 /* Workaround: Don't disable the overlay fully, since otherwise
370 * it dies on the next OVERLAY_ON cmd. */ 367 * it dies on the next OVERLAY_ON cmd. */
371 intel_ring_emit(engine, MI_NOOP); 368 intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
408 */ 405 */
409static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 406static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
410{ 407{
411 struct drm_device *dev = overlay->dev; 408 struct drm_i915_private *dev_priv = overlay->i915;
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
414 int ret; 410 int ret;
415 411
416 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 412 lockdep_assert_held(&dev_priv->dev->struct_mutex);
417 413
418 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
419 * guarantee forward progress. 415 * guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
537 } 533 }
538} 534}
539 535
540static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 536static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
541{ 537{
542 u32 mask, shift, ret; 538 u32 mask, shift, ret;
543 if (IS_GEN2(dev)) { 539 if (IS_GEN2(dev_priv)) {
544 mask = 0x1f; 540 mask = 0x1f;
545 shift = 5; 541 shift = 5;
546 } else { 542 } else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
548 shift = 6; 544 shift = 6;
549 } 545 }
550 ret = ((offset + width + mask) >> shift) - (offset >> shift); 546 ret = ((offset + width + mask) >> shift) - (offset >> shift);
551 if (!IS_GEN2(dev)) 547 if (!IS_GEN2(dev_priv))
552 ret <<= 1; 548 ret <<= 1;
553 ret -= 1; 549 ret -= 1;
554 return ret << 2; 550 return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 int ret, tmp_width; 737 int ret, tmp_width;
742 struct overlay_registers __iomem *regs; 738 struct overlay_registers __iomem *regs;
743 bool scale_changed = false; 739 bool scale_changed = false;
744 struct drm_device *dev = overlay->dev; 740 struct drm_i915_private *dev_priv = overlay->i915;
745 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
746 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
747 743
748 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 744 lockdep_assert_held(&dev_priv->dev->struct_mutex);
749 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
750 746
751 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
752 if (ret != 0) 748 if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
769 goto out_unpin; 765 goto out_unpin;
770 } 766 }
771 oconfig = OCONF_CC_OUT_8BIT; 767 oconfig = OCONF_CC_OUT_8BIT;
772 if (IS_GEN4(overlay->dev)) 768 if (IS_GEN4(dev_priv))
773 oconfig |= OCONF_CSC_MODE_BT709; 769 oconfig |= OCONF_CSC_MODE_BT709;
774 oconfig |= pipe == 0 ? 770 oconfig |= pipe == 0 ?
775 OCONF_PIPE_A : OCONF_PIPE_B; 771 OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
796 tmp_width = params->src_w; 792 tmp_width = params->src_w;
797 793
798 swidth = params->src_w; 794 swidth = params->src_w;
799 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 795 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
800 sheight = params->src_h; 796 sheight = params->src_h;
801 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y); 797 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
802 ostride = params->stride_Y; 798 ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
806 int uv_vscale = uv_vsubsampling(params->format); 802 int uv_vscale = uv_vsubsampling(params->format);
807 u32 tmp_U, tmp_V; 803 u32 tmp_U, tmp_V;
808 swidth |= (params->src_w/uv_hscale) << 16; 804 swidth |= (params->src_w/uv_hscale) << 16;
809 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 805 tmp_U = calc_swidthsw(dev_priv, params->offset_U,
810 params->src_w/uv_hscale); 806 params->src_w/uv_hscale);
811 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 807 tmp_V = calc_swidthsw(dev_priv, params->offset_V,
812 params->src_w/uv_hscale); 808 params->src_w/uv_hscale);
813 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 809 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
814 sheight |= (params->src_h/uv_vscale) << 16; 810 sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
840 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
841 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
842 838
843 intel_frontbuffer_flip(dev, 839 intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
844 INTEL_FRONTBUFFER_OVERLAY(pipe));
845 840
846 return 0; 841 return 0;
847 842
@@ -852,12 +847,12 @@ out_unpin:
852 847
853int intel_overlay_switch_off(struct intel_overlay *overlay) 848int intel_overlay_switch_off(struct intel_overlay *overlay)
854{ 849{
850 struct drm_i915_private *dev_priv = overlay->i915;
855 struct overlay_registers __iomem *regs; 851 struct overlay_registers __iomem *regs;
856 struct drm_device *dev = overlay->dev;
857 int ret; 852 int ret;
858 853
859 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 854 lockdep_assert_held(&dev_priv->dev->struct_mutex);
860 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 855 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
861 856
862 ret = intel_overlay_recover_from_interrupt(overlay); 857 ret = intel_overlay_recover_from_interrupt(overlay);
863 if (ret != 0) 858 if (ret != 0)
@@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
897 892
898static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 893static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
899{ 894{
900 struct drm_device *dev = overlay->dev; 895 struct drm_i915_private *dev_priv = overlay->i915;
901 struct drm_i915_private *dev_priv = dev->dev_private;
902 u32 pfit_control = I915_READ(PFIT_CONTROL); 896 u32 pfit_control = I915_READ(PFIT_CONTROL);
903 u32 ratio; 897 u32 ratio;
904 898
905 /* XXX: This is not the same logic as in the xorg driver, but more in 899 /* XXX: This is not the same logic as in the xorg driver, but more in
906 * line with the intel documentation for the i965 900 * line with the intel documentation for the i965
907 */ 901 */
908 if (INTEL_INFO(dev)->gen >= 4) { 902 if (INTEL_GEN(dev_priv) >= 4) {
909 /* on i965 use the PGM reg to read out the autoscaler values */ 903 /* on i965 use the PGM reg to read out the autoscaler values */
910 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; 904 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
911 } else { 905 } else {
@@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
948 return 0; 942 return 0;
949} 943}
950 944
951static int check_overlay_src(struct drm_device *dev, 945static int check_overlay_src(struct drm_i915_private *dev_priv,
952 struct drm_intel_overlay_put_image *rec, 946 struct drm_intel_overlay_put_image *rec,
953 struct drm_i915_gem_object *new_bo) 947 struct drm_i915_gem_object *new_bo)
954{ 948{
@@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev,
959 u32 tmp; 953 u32 tmp;
960 954
961 /* check src dimensions */ 955 /* check src dimensions */
962 if (IS_845G(dev) || IS_I830(dev)) { 956 if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
963 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || 957 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
964 rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 958 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
965 return -EINVAL; 959 return -EINVAL;
@@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev,
1011 return -EINVAL; 1005 return -EINVAL;
1012 1006
1013 /* stride checking */ 1007 /* stride checking */
1014 if (IS_I830(dev) || IS_845G(dev)) 1008 if (IS_I830(dev_priv) || IS_845G(dev_priv))
1015 stride_mask = 255; 1009 stride_mask = 255;
1016 else 1010 else
1017 stride_mask = 63; 1011 stride_mask = 63;
1018 1012
1019 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1013 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1020 return -EINVAL; 1014 return -EINVAL;
1021 if (IS_GEN4(dev) && rec->stride_Y < 512) 1015 if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
1022 return -EINVAL; 1016 return -EINVAL;
1023 1017
1024 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1018 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev,
1063 * Return the pipe currently connected to the panel fitter, 1057 * Return the pipe currently connected to the panel fitter,
1064 * or -1 if the panel fitter is not present or not in use 1058 * or -1 if the panel fitter is not present or not in use
1065 */ 1059 */
1066static int intel_panel_fitter_pipe(struct drm_device *dev) 1060static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
1067{ 1061{
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 u32 pfit_control; 1062 u32 pfit_control;
1070 1063
1071 /* i830 doesn't have a panel fitter */ 1064 /* i830 doesn't have a panel fitter */
1072 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 1065 if (INTEL_GEN(dev_priv) <= 3 &&
1066 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
1073 return -1; 1067 return -1;
1074 1068
1075 pfit_control = I915_READ(PFIT_CONTROL); 1069 pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1079 return -1; 1073 return -1;
1080 1074
1081 /* 965 can place panel fitter on either pipe */ 1075 /* 965 can place panel fitter on either pipe */
1082 if (IS_GEN4(dev)) 1076 if (IS_GEN4(dev_priv))
1083 return (pfit_control >> 29) & 0x3; 1077 return (pfit_control >> 29) & 0x3;
1084 1078
1085 /* older chips can only use pipe 1 */ 1079 /* older chips can only use pipe 1 */
1086 return 1; 1080 return 1;
1087} 1081}
1088 1082
1089int intel_overlay_put_image(struct drm_device *dev, void *data, 1083int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1090 struct drm_file *file_priv) 1084 struct drm_file *file_priv)
1091{ 1085{
1092 struct drm_intel_overlay_put_image *put_image_rec = data; 1086 struct drm_intel_overlay_put_image *put_image_rec = data;
1093 struct drm_i915_private *dev_priv = dev->dev_private; 1087 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1162 1156
1163 /* line too wide, i.e. one-line-mode */ 1157 /* line too wide, i.e. one-line-mode */
1164 if (mode->hdisplay > 1024 && 1158 if (mode->hdisplay > 1024 &&
1165 intel_panel_fitter_pipe(dev) == crtc->pipe) { 1159 intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
1166 overlay->pfit_active = true; 1160 overlay->pfit_active = true;
1167 update_pfit_vscale_ratio(overlay); 1161 update_pfit_vscale_ratio(overlay);
1168 } else 1162 } else
@@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1196 goto out_unlock; 1190 goto out_unlock;
1197 } 1191 }
1198 1192
1199 ret = check_overlay_src(dev, put_image_rec, new_bo); 1193 ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
1200 if (ret != 0) 1194 if (ret != 0)
1201 goto out_unlock; 1195 goto out_unlock;
1202 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; 1196 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1284 return 0; 1278 return 0;
1285} 1279}
1286 1280
1287int intel_overlay_attrs(struct drm_device *dev, void *data, 1281int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1288 struct drm_file *file_priv) 1282 struct drm_file *file_priv)
1289{ 1283{
1290 struct drm_intel_overlay_attrs *attrs = data; 1284 struct drm_intel_overlay_attrs *attrs = data;
1291 struct drm_i915_private *dev_priv = dev->dev_private; 1285 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1309 attrs->contrast = overlay->contrast; 1303 attrs->contrast = overlay->contrast;
1310 attrs->saturation = overlay->saturation; 1304 attrs->saturation = overlay->saturation;
1311 1305
1312 if (!IS_GEN2(dev)) { 1306 if (!IS_GEN2(dev_priv)) {
1313 attrs->gamma0 = I915_READ(OGAMC0); 1307 attrs->gamma0 = I915_READ(OGAMC0);
1314 attrs->gamma1 = I915_READ(OGAMC1); 1308 attrs->gamma1 = I915_READ(OGAMC1);
1315 attrs->gamma2 = I915_READ(OGAMC2); 1309 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1341 intel_overlay_unmap_regs(overlay, regs); 1335 intel_overlay_unmap_regs(overlay, regs);
1342 1336
1343 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1337 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1344 if (IS_GEN2(dev)) 1338 if (IS_GEN2(dev_priv))
1345 goto out_unlock; 1339 goto out_unlock;
1346 1340
1347 if (overlay->active) { 1341 if (overlay->active) {
@@ -1371,37 +1365,36 @@ out_unlock:
1371 return ret; 1365 return ret;
1372} 1366}
1373 1367
1374void intel_setup_overlay(struct drm_device *dev) 1368void intel_setup_overlay(struct drm_i915_private *dev_priv)
1375{ 1369{
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 struct intel_overlay *overlay; 1370 struct intel_overlay *overlay;
1378 struct drm_i915_gem_object *reg_bo; 1371 struct drm_i915_gem_object *reg_bo;
1379 struct overlay_registers __iomem *regs; 1372 struct overlay_registers __iomem *regs;
1380 int ret; 1373 int ret;
1381 1374
1382 if (!HAS_OVERLAY(dev)) 1375 if (!HAS_OVERLAY(dev_priv))
1383 return; 1376 return;
1384 1377
1385 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 1378 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1386 if (!overlay) 1379 if (!overlay)
1387 return; 1380 return;
1388 1381
1389 mutex_lock(&dev->struct_mutex); 1382 mutex_lock(&dev_priv->dev->struct_mutex);
1390 if (WARN_ON(dev_priv->overlay)) 1383 if (WARN_ON(dev_priv->overlay))
1391 goto out_free; 1384 goto out_free;
1392 1385
1393 overlay->dev = dev; 1386 overlay->i915 = dev_priv;
1394 1387
1395 reg_bo = NULL; 1388 reg_bo = NULL;
1396 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1389 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1397 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1390 reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
1398 if (reg_bo == NULL)
1399 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1400 if (reg_bo == NULL) 1391 if (reg_bo == NULL)
1392 reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
1393 if (IS_ERR(reg_bo))
1401 goto out_free; 1394 goto out_free;
1402 overlay->reg_bo = reg_bo; 1395 overlay->reg_bo = reg_bo;
1403 1396
1404 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1397 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1405 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); 1398 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1406 if (ret) { 1399 if (ret) {
1407 DRM_ERROR("failed to attach phys overlay regs\n"); 1400 DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev)
1441 intel_overlay_unmap_regs(overlay, regs); 1434 intel_overlay_unmap_regs(overlay, regs);
1442 1435
1443 dev_priv->overlay = overlay; 1436 dev_priv->overlay = overlay;
1444 mutex_unlock(&dev->struct_mutex); 1437 mutex_unlock(&dev_priv->dev->struct_mutex);
1445 DRM_INFO("initialized overlay support\n"); 1438 DRM_INFO("initialized overlay support\n");
1446 return; 1439 return;
1447 1440
1448out_unpin_bo: 1441out_unpin_bo:
1449 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1442 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1450 i915_gem_object_ggtt_unpin(reg_bo); 1443 i915_gem_object_ggtt_unpin(reg_bo);
1451out_free_bo: 1444out_free_bo:
1452 drm_gem_object_unreference(&reg_bo->base); 1445 drm_gem_object_unreference(&reg_bo->base);
1453out_free: 1446out_free:
1454 mutex_unlock(&dev->struct_mutex); 1447 mutex_unlock(&dev_priv->dev->struct_mutex);
1455 kfree(overlay); 1448 kfree(overlay);
1456 return; 1449 return;
1457} 1450}
1458 1451
1459void intel_cleanup_overlay(struct drm_device *dev) 1452void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1460{ 1453{
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462
1463 if (!dev_priv->overlay) 1454 if (!dev_priv->overlay)
1464 return; 1455 return;
1465 1456
@@ -1482,18 +1473,17 @@ struct intel_overlay_error_state {
1482static struct overlay_registers __iomem * 1473static struct overlay_registers __iomem *
1483intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1474intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1484{ 1475{
1485 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 1476 struct drm_i915_private *dev_priv = overlay->i915;
1486 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1487 struct overlay_registers __iomem *regs; 1477 struct overlay_registers __iomem *regs;
1488 1478
1489 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1479 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1490 /* Cast to make sparse happy, but it's wc memory anyway, so 1480 /* Cast to make sparse happy, but it's wc memory anyway, so
1491 * equivalent to the wc io mapping on X86. */ 1481 * equivalent to the wc io mapping on X86. */
1492 regs = (struct overlay_registers __iomem *) 1482 regs = (struct overlay_registers __iomem *)
1493 overlay->reg_bo->phys_handle->vaddr; 1483 overlay->reg_bo->phys_handle->vaddr;
1494 else 1484 else
1495 regs = io_mapping_map_atomic_wc(ggtt->mappable, 1485 regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
1496 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1486 overlay->flip_addr);
1497 1487
1498 return regs; 1488 return regs;
1499} 1489}
@@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1501static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1491static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1502 struct overlay_registers __iomem *regs) 1492 struct overlay_registers __iomem *regs)
1503{ 1493{
1504 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1494 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1505 io_mapping_unmap_atomic(regs); 1495 io_mapping_unmap_atomic(regs);
1506} 1496}
1507 1497
1508
1509struct intel_overlay_error_state * 1498struct intel_overlay_error_state *
1510intel_overlay_capture_error_state(struct drm_device *dev) 1499intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1511{ 1500{
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 struct intel_overlay *overlay = dev_priv->overlay; 1501 struct intel_overlay *overlay = dev_priv->overlay;
1514 struct intel_overlay_error_state *error; 1502 struct intel_overlay_error_state *error;
1515 struct overlay_registers __iomem *regs; 1503 struct overlay_registers __iomem *regs;
@@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1523 1511
1524 error->dovsta = I915_READ(DOVSTA); 1512 error->dovsta = I915_READ(DOVSTA);
1525 error->isr = I915_READ(ISR); 1513 error->isr = I915_READ(ISR);
1526 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1514 error->base = overlay->flip_addr;
1527 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1528 else
1529 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1530 1515
1531 regs = intel_overlay_map_regs_atomic(overlay); 1516 regs = intel_overlay_map_regs_atomic(overlay);
1532 if (!regs) 1517 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..bf721781c259 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1216 return 0; 1216 return 0;
1217} 1217}
1218 1218
1219static void intel_backlight_device_unregister(struct intel_connector *connector) 1219void intel_backlight_device_unregister(struct intel_connector *connector)
1220{ 1220{
1221 struct intel_panel *panel = &connector->panel; 1221 struct intel_panel *panel = &connector->panel;
1222 1222
@@ -1230,9 +1230,6 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1230{ 1230{
1231 return 0; 1231 return 0;
1232} 1232}
1233static void intel_backlight_device_unregister(struct intel_connector *connector)
1234{
1235}
1236#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1233#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1237 1234
1238/* 1235/*
@@ -1724,6 +1721,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1724 container_of(panel, struct intel_connector, panel); 1721 container_of(panel, struct intel_connector, panel);
1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1722 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726 1723
1724 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1725 intel_dp_aux_init_backlight_funcs(connector) == 0)
1726 return;
1727
1728 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
1729 intel_dsi_dcs_init_backlight_funcs(connector) == 0)
1730 return;
1731
1727 if (IS_BROXTON(dev_priv)) { 1732 if (IS_BROXTON(dev_priv)) {
1728 panel->backlight.setup = bxt_setup_backlight; 1733 panel->backlight.setup = bxt_setup_backlight;
1729 panel->backlight.enable = bxt_enable_backlight; 1734 panel->backlight.enable = bxt_enable_backlight;
@@ -1812,11 +1817,3 @@ void intel_backlight_register(struct drm_device *dev)
1812 for_each_intel_connector(dev, connector) 1817 for_each_intel_connector(dev, connector)
1813 intel_backlight_device_register(connector); 1818 intel_backlight_device_register(connector);
1814} 1819}
1815
1816void intel_backlight_unregister(struct drm_device *dev)
1817{
1818 struct intel_connector *connector;
1819
1820 for_each_intel_connector(dev, connector)
1821 intel_backlight_device_unregister(connector);
1822}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..658a75659657 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <drm/drm_plane_helper.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 32#include "../../../platform/x86/intel_ips.h"
@@ -54,10 +55,38 @@
54#define INTEL_RC6p_ENABLE (1<<1) 55#define INTEL_RC6p_ENABLE (1<<1)
55#define INTEL_RC6pp_ENABLE (1<<2) 56#define INTEL_RC6pp_ENABLE (1<<2)
56 57
58static void gen9_init_clock_gating(struct drm_device *dev)
59{
60 struct drm_i915_private *dev_priv = dev->dev_private;
61
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1,
64 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65
66 I915_WRITE(GEN8_CONFIG0,
67 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68
69 /* WaEnableChickenDCPR:skl,bxt,kbl */
70 I915_WRITE(GEN8_CHICKEN_DCPR_1,
71 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72
73 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
74 /* WaFbcWakeMemOn:skl,bxt,kbl */
75 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_WM_DIS |
77 DISP_FBC_MEMORY_WAKE);
78
79 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
80 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
81 ILK_DPFC_DISABLE_DUMMY0);
82}
83
57static void bxt_init_clock_gating(struct drm_device *dev) 84static void bxt_init_clock_gating(struct drm_device *dev)
58{ 85{
59 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = dev->dev_private;
60 87
88 gen9_init_clock_gating(dev);
89
61 /* WaDisableSDEUnitClockGating:bxt */ 90 /* WaDisableSDEUnitClockGating:bxt */
62 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 91 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
63 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 92 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -2012,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2012} 2041}
2013 2042
2014static uint32_t 2043static uint32_t
2015hsw_compute_linetime_wm(struct drm_device *dev, 2044hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2016 struct intel_crtc_state *cstate)
2017{ 2045{
2018 struct drm_i915_private *dev_priv = dev->dev_private; 2046 const struct intel_atomic_state *intel_state =
2047 to_intel_atomic_state(cstate->base.state);
2019 const struct drm_display_mode *adjusted_mode = 2048 const struct drm_display_mode *adjusted_mode =
2020 &cstate->base.adjusted_mode; 2049 &cstate->base.adjusted_mode;
2021 u32 linetime, ips_linetime; 2050 u32 linetime, ips_linetime;
@@ -2024,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2024 return 0; 2053 return 0;
2025 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2054 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2026 return 0; 2055 return 0;
2027 if (WARN_ON(dev_priv->cdclk_freq == 0)) 2056 if (WARN_ON(intel_state->cdclk == 0))
2028 return 0; 2057 return 0;
2029 2058
2030 /* The WM are computed with base on how long it takes to fill a single 2059 /* The WM are computed with base on how long it takes to fill a single
@@ -2033,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2033 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2062 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2034 adjusted_mode->crtc_clock); 2063 adjusted_mode->crtc_clock);
2035 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2064 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2036 dev_priv->cdclk_freq); 2065 intel_state->cdclk);
2037 2066
2038 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2067 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2039 PIPE_WM_LINETIME_TIME(linetime); 2068 PIPE_WM_LINETIME_TIME(linetime);
@@ -2146,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2146static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2175static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2147{ 2176{
2148 /* ILK sprite LP0 latency is 1300 ns */ 2177 /* ILK sprite LP0 latency is 1300 ns */
2149 if (INTEL_INFO(dev)->gen == 5) 2178 if (IS_GEN5(dev))
2150 wm[0] = 13; 2179 wm[0] = 13;
2151} 2180}
2152 2181
2153static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2182static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2154{ 2183{
2155 /* ILK cursor LP0 latency is 1300 ns */ 2184 /* ILK cursor LP0 latency is 1300 ns */
2156 if (INTEL_INFO(dev)->gen == 5) 2185 if (IS_GEN5(dev))
2157 wm[0] = 13; 2186 wm[0] = 13;
2158 2187
2159 /* WaDoubleCursorLP3Latency:ivb */ 2188 /* WaDoubleCursorLP3Latency:ivb */
@@ -2309,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2309 int level, max_level = ilk_wm_max_level(dev), usable_level; 2338 int level, max_level = ilk_wm_max_level(dev), usable_level;
2310 struct ilk_wm_maximums max; 2339 struct ilk_wm_maximums max;
2311 2340
2312 pipe_wm = &cstate->wm.optimal.ilk; 2341 pipe_wm = &cstate->wm.ilk.optimal;
2313 2342
2314 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2343 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2315 struct intel_plane_state *ps; 2344 struct intel_plane_state *ps;
@@ -2352,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2352 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2381 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2353 2382
2354 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2383 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2355 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); 2384 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2356 2385
2357 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2386 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2358 return -EINVAL; 2387 return -EINVAL;
@@ -2391,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2391 struct intel_crtc *intel_crtc, 2420 struct intel_crtc *intel_crtc,
2392 struct intel_crtc_state *newstate) 2421 struct intel_crtc_state *newstate)
2393{ 2422{
2394 struct intel_pipe_wm *a = &newstate->wm.intermediate; 2423 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2395 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2424 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2396 int level, max_level = ilk_wm_max_level(dev); 2425 int level, max_level = ilk_wm_max_level(dev);
2397 2426
@@ -2400,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2400 * currently active watermarks to get values that are safe both before 2429 * currently active watermarks to get values that are safe both before
2401 * and after the vblank. 2430 * and after the vblank.
2402 */ 2431 */
2403 *a = newstate->wm.optimal.ilk; 2432 *a = newstate->wm.ilk.optimal;
2404 a->pipe_enabled |= b->pipe_enabled; 2433 a->pipe_enabled |= b->pipe_enabled;
2405 a->sprites_enabled |= b->sprites_enabled; 2434 a->sprites_enabled |= b->sprites_enabled;
2406 a->sprites_scaled |= b->sprites_scaled; 2435 a->sprites_scaled |= b->sprites_scaled;
@@ -2429,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2429 * If our intermediate WM are identical to the final WM, then we can 2458 * If our intermediate WM are identical to the final WM, then we can
2430 * omit the post-vblank programming; only update if it's different. 2459 * omit the post-vblank programming; only update if it's different.
2431 */ 2460 */
2432 if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) 2461 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2433 newstate->wm.need_postvbl_update = false; 2462 newstate->wm.need_postvbl_update = false;
2434 2463
2435 return 0; 2464 return 0;
@@ -2849,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
2849static void 2878static void
2850skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2879skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2851 const struct intel_crtc_state *cstate, 2880 const struct intel_crtc_state *cstate,
2852 const struct intel_wm_config *config, 2881 struct skl_ddb_entry *alloc, /* out */
2853 struct skl_ddb_entry *alloc /* out */) 2882 int *num_active /* out */)
2854{ 2883{
2884 struct drm_atomic_state *state = cstate->base.state;
2885 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2886 struct drm_i915_private *dev_priv = to_i915(dev);
2855 struct drm_crtc *for_crtc = cstate->base.crtc; 2887 struct drm_crtc *for_crtc = cstate->base.crtc;
2856 struct drm_crtc *crtc;
2857 unsigned int pipe_size, ddb_size; 2888 unsigned int pipe_size, ddb_size;
2858 int nth_active_pipe; 2889 int nth_active_pipe;
2890 int pipe = to_intel_crtc(for_crtc)->pipe;
2859 2891
2860 if (!cstate->base.active) { 2892 if (WARN_ON(!state) || !cstate->base.active) {
2861 alloc->start = 0; 2893 alloc->start = 0;
2862 alloc->end = 0; 2894 alloc->end = 0;
2895 *num_active = hweight32(dev_priv->active_crtcs);
2863 return; 2896 return;
2864 } 2897 }
2865 2898
2899 if (intel_state->active_pipe_changes)
2900 *num_active = hweight32(intel_state->active_crtcs);
2901 else
2902 *num_active = hweight32(dev_priv->active_crtcs);
2903
2866 if (IS_BROXTON(dev)) 2904 if (IS_BROXTON(dev))
2867 ddb_size = BXT_DDB_SIZE; 2905 ddb_size = BXT_DDB_SIZE;
2868 else 2906 else
@@ -2870,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2870 2908
2871 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2909 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2872 2910
2873 nth_active_pipe = 0; 2911 /*
2874 for_each_crtc(dev, crtc) { 2912 * If the state doesn't change the active CRTC's, then there's
2875 if (!to_intel_crtc(crtc)->active) 2913 * no need to recalculate; the existing pipe allocation limits
2876 continue; 2914 * should remain unchanged. Note that we're safe from racing
2877 2915 * commits since any racing commit that changes the active CRTC
2878 if (crtc == for_crtc) 2916 * list would need to grab _all_ crtc locks, including the one
2879 break; 2917 * we currently hold.
2880 2918 */
2881 nth_active_pipe++; 2919 if (!intel_state->active_pipe_changes) {
2920 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2921 return;
2882 } 2922 }
2883 2923
2884 pipe_size = ddb_size / config->num_pipes_active; 2924 nth_active_pipe = hweight32(intel_state->active_crtcs &
2885 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2925 (drm_crtc_mask(for_crtc) - 1));
2926 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2927 alloc->start = nth_active_pipe * ddb_size / *num_active;
2886 alloc->end = alloc->start + pipe_size; 2928 alloc->end = alloc->start + pipe_size;
2887} 2929}
2888 2930
2889static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2931static unsigned int skl_cursor_allocation(int num_active)
2890{ 2932{
2891 if (config->num_pipes_active == 1) 2933 if (num_active == 1)
2892 return 32; 2934 return 32;
2893 2935
2894 return 8; 2936 return 8;
@@ -2932,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2932 } 2974 }
2933} 2975}
2934 2976
2977/*
2978 * Determines the downscale amount of a plane for the purposes of watermark calculations.
2979 * The bspec defines downscale amount as:
2980 *
2981 * """
2982 * Horizontal down scale amount = maximum[1, Horizontal source size /
2983 * Horizontal destination size]
2984 * Vertical down scale amount = maximum[1, Vertical source size /
2985 * Vertical destination size]
2986 * Total down scale amount = Horizontal down scale amount *
2987 * Vertical down scale amount
2988 * """
2989 *
2990 * Return value is provided in 16.16 fixed point form to retain fractional part.
2991 * Caller should take care of dividing & rounding off the value.
2992 */
2993static uint32_t
2994skl_plane_downscale_amount(const struct intel_plane_state *pstate)
2995{
2996 uint32_t downscale_h, downscale_w;
2997 uint32_t src_w, src_h, dst_w, dst_h;
2998
2999 if (WARN_ON(!pstate->visible))
3000 return DRM_PLANE_HELPER_NO_SCALING;
3001
3002 /* n.b., src is 16.16 fixed point, dst is whole integer */
3003 src_w = drm_rect_width(&pstate->src);
3004 src_h = drm_rect_height(&pstate->src);
3005 dst_w = drm_rect_width(&pstate->dst);
3006 dst_h = drm_rect_height(&pstate->dst);
3007 if (intel_rotation_90_or_270(pstate->base.rotation))
3008 swap(dst_w, dst_h);
3009
3010 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3011 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3012
3013 /* Provide result in 16.16 fixed point */
3014 return (uint64_t)downscale_w * downscale_h >> 16;
3015}
3016
2935static unsigned int 3017static unsigned int
2936skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3018skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2937 const struct drm_plane_state *pstate, 3019 const struct drm_plane_state *pstate,
@@ -2939,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2939{ 3021{
2940 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3022 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2941 struct drm_framebuffer *fb = pstate->fb; 3023 struct drm_framebuffer *fb = pstate->fb;
3024 uint32_t down_scale_amount, data_rate;
2942 uint32_t width = 0, height = 0; 3025 uint32_t width = 0, height = 0;
3026 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3027
3028 if (!intel_pstate->visible)
3029 return 0;
3030 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3031 return 0;
3032 if (y && format != DRM_FORMAT_NV12)
3033 return 0;
2943 3034
2944 width = drm_rect_width(&intel_pstate->src) >> 16; 3035 width = drm_rect_width(&intel_pstate->src) >> 16;
2945 height = drm_rect_height(&intel_pstate->src) >> 16; 3036 height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2948,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2948 swap(width, height); 3039 swap(width, height);
2949 3040
2950 /* for planar format */ 3041 /* for planar format */
2951 if (fb->pixel_format == DRM_FORMAT_NV12) { 3042 if (format == DRM_FORMAT_NV12) {
2952 if (y) /* y-plane data rate */ 3043 if (y) /* y-plane data rate */
2953 return width * height * 3044 data_rate = width * height *
2954 drm_format_plane_cpp(fb->pixel_format, 0); 3045 drm_format_plane_cpp(format, 0);
2955 else /* uv-plane data rate */ 3046 else /* uv-plane data rate */
2956 return (width / 2) * (height / 2) * 3047 data_rate = (width / 2) * (height / 2) *
2957 drm_format_plane_cpp(fb->pixel_format, 1); 3048 drm_format_plane_cpp(format, 1);
3049 } else {
3050 /* for packed formats */
3051 data_rate = width * height * drm_format_plane_cpp(format, 0);
2958 } 3052 }
2959 3053
2960 /* for packed formats */ 3054 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
2961 return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 3055
3056 return (uint64_t)data_rate * down_scale_amount >> 16;
2962} 3057}
2963 3058
2964/* 3059/*
@@ -2967,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2967 * 3 * 4096 * 8192 * 4 < 2^32 3062 * 3 * 4096 * 8192 * 4 < 2^32
2968 */ 3063 */
2969static unsigned int 3064static unsigned int
2970skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 3065skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
2971{ 3066{
2972 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3067 struct drm_crtc_state *cstate = &intel_cstate->base;
2973 struct drm_device *dev = intel_crtc->base.dev; 3068 struct drm_atomic_state *state = cstate->state;
3069 struct drm_crtc *crtc = cstate->crtc;
3070 struct drm_device *dev = crtc->dev;
3071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3072 const struct drm_plane *plane;
2974 const struct intel_plane *intel_plane; 3073 const struct intel_plane *intel_plane;
2975 unsigned int total_data_rate = 0; 3074 struct drm_plane_state *pstate;
3075 unsigned int rate, total_data_rate = 0;
3076 int id;
3077 int i;
2976 3078
2977 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3079 if (WARN_ON(!state))
2978 const struct drm_plane_state *pstate = intel_plane->base.state; 3080 return 0;
2979 3081
2980 if (pstate->fb == NULL) 3082 /* Calculate and cache data rate for each plane */
2981 continue; 3083 for_each_plane_in_state(state, plane, pstate, i) {
3084 id = skl_wm_plane_id(to_intel_plane(plane));
3085 intel_plane = to_intel_plane(plane);
2982 3086
2983 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3087 if (intel_plane->pipe != intel_crtc->pipe)
2984 continue; 3088 continue;
2985 3089
2986 /* packed/uv */ 3090 /* packed/uv */
2987 total_data_rate += skl_plane_relative_data_rate(cstate, 3091 rate = skl_plane_relative_data_rate(intel_cstate,
2988 pstate, 3092 pstate, 0);
2989 0); 3093 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3094
3095 /* y-plane */
3096 rate = skl_plane_relative_data_rate(intel_cstate,
3097 pstate, 1);
3098 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3099 }
2990 3100
2991 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 3101 /* Calculate CRTC's total data rate from cached values */
2992 /* y-plane */ 3102 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2993 total_data_rate += skl_plane_relative_data_rate(cstate, 3103 int id = skl_wm_plane_id(intel_plane);
2994 pstate, 3104
2995 1); 3105 /* packed/uv */
3106 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
2996 } 3108 }
2997 3109
3110 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3111
2998 return total_data_rate; 3112 return total_data_rate;
2999} 3113}
3000 3114
3001static void 3115static uint16_t
3116skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3117 const int y)
3118{
3119 struct drm_framebuffer *fb = pstate->fb;
3120 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3121 uint32_t src_w, src_h;
3122 uint32_t min_scanlines = 8;
3123 uint8_t plane_bpp;
3124
3125 if (WARN_ON(!fb))
3126 return 0;
3127
3128 /* For packed formats, no y-plane, return 0 */
3129 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3130 return 0;
3131
3132 /* For Non Y-tile return 8-blocks */
3133 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3134 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3135 return 8;
3136
3137 src_w = drm_rect_width(&intel_pstate->src) >> 16;
3138 src_h = drm_rect_height(&intel_pstate->src) >> 16;
3139
3140 if (intel_rotation_90_or_270(pstate->rotation))
3141 swap(src_w, src_h);
3142
3143 /* Halve UV plane width and height for NV12 */
3144 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3145 src_w /= 2;
3146 src_h /= 2;
3147 }
3148
3149 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3150 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3151 else
3152 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3153
3154 if (intel_rotation_90_or_270(pstate->rotation)) {
3155 switch (plane_bpp) {
3156 case 1:
3157 min_scanlines = 32;
3158 break;
3159 case 2:
3160 min_scanlines = 16;
3161 break;
3162 case 4:
3163 min_scanlines = 8;
3164 break;
3165 case 8:
3166 min_scanlines = 4;
3167 break;
3168 default:
3169 WARN(1, "Unsupported pixel depth %u for rotation",
3170 plane_bpp);
3171 min_scanlines = 32;
3172 }
3173 }
3174
3175 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3176}
3177
3178static int
3002skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3179skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3003 struct skl_ddb_allocation *ddb /* out */) 3180 struct skl_ddb_allocation *ddb /* out */)
3004{ 3181{
3182 struct drm_atomic_state *state = cstate->base.state;
3005 struct drm_crtc *crtc = cstate->base.crtc; 3183 struct drm_crtc *crtc = cstate->base.crtc;
3006 struct drm_device *dev = crtc->dev; 3184 struct drm_device *dev = crtc->dev;
3007 struct drm_i915_private *dev_priv = to_i915(dev);
3008 struct intel_wm_config *config = &dev_priv->wm.config;
3009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3010 struct intel_plane *intel_plane; 3186 struct intel_plane *intel_plane;
3187 struct drm_plane *plane;
3188 struct drm_plane_state *pstate;
3011 enum pipe pipe = intel_crtc->pipe; 3189 enum pipe pipe = intel_crtc->pipe;
3012 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3190 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3013 uint16_t alloc_size, start, cursor_blocks; 3191 uint16_t alloc_size, start, cursor_blocks;
3014 uint16_t minimum[I915_MAX_PLANES]; 3192 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3015 uint16_t y_minimum[I915_MAX_PLANES]; 3193 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3016 unsigned int total_data_rate; 3194 unsigned int total_data_rate;
3195 int num_active;
3196 int id, i;
3197
3198 if (WARN_ON(!state))
3199 return 0;
3017 3200
3018 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 3201 if (!cstate->base.active) {
3202 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3203 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3204 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3205 return 0;
3206 }
3207
3208 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3019 alloc_size = skl_ddb_entry_size(alloc); 3209 alloc_size = skl_ddb_entry_size(alloc);
3020 if (alloc_size == 0) { 3210 if (alloc_size == 0) {
3021 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3211 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3022 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 3212 return 0;
3023 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3024 return;
3025 } 3213 }
3026 3214
3027 cursor_blocks = skl_cursor_allocation(config); 3215 cursor_blocks = skl_cursor_allocation(num_active);
3028 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3216 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3029 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3217 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3030 3218
3031 alloc_size -= cursor_blocks; 3219 alloc_size -= cursor_blocks;
3032 alloc->end -= cursor_blocks;
3033 3220
3034 /* 1. Allocate the mininum required blocks for each active plane */ 3221 /* 1. Allocate the mininum required blocks for each active plane */
3035 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3222 for_each_plane_in_state(state, plane, pstate, i) {
3036 struct drm_plane *plane = &intel_plane->base; 3223 intel_plane = to_intel_plane(plane);
3037 struct drm_framebuffer *fb = plane->state->fb; 3224 id = skl_wm_plane_id(intel_plane);
3038 int id = skl_wm_plane_id(intel_plane);
3039 3225
3040 if (!to_intel_plane_state(plane->state)->visible) 3226 if (intel_plane->pipe != pipe)
3041 continue; 3227 continue;
3042 3228
3043 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3229 if (!to_intel_plane_state(pstate)->visible) {
3230 minimum[id] = 0;
3231 y_minimum[id] = 0;
3232 continue;
3233 }
3234 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3235 minimum[id] = 0;
3236 y_minimum[id] = 0;
3044 continue; 3237 continue;
3238 }
3045 3239
3046 minimum[id] = 8; 3240 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3047 alloc_size -= minimum[id]; 3241 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3048 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3242 }
3049 alloc_size -= y_minimum[id]; 3243
3244 for (i = 0; i < PLANE_CURSOR; i++) {
3245 alloc_size -= minimum[i];
3246 alloc_size -= y_minimum[i];
3050 } 3247 }
3051 3248
3052 /* 3249 /*
@@ -3056,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3056 * FIXME: we may not allocate every single block here. 3253 * FIXME: we may not allocate every single block here.
3057 */ 3254 */
3058 total_data_rate = skl_get_total_relative_data_rate(cstate); 3255 total_data_rate = skl_get_total_relative_data_rate(cstate);
3256 if (total_data_rate == 0)
3257 return 0;
3059 3258
3060 start = alloc->start; 3259 start = alloc->start;
3061 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3260 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3062 struct drm_plane *plane = &intel_plane->base;
3063 struct drm_plane_state *pstate = intel_plane->base.state;
3064 unsigned int data_rate, y_data_rate; 3261 unsigned int data_rate, y_data_rate;
3065 uint16_t plane_blocks, y_plane_blocks = 0; 3262 uint16_t plane_blocks, y_plane_blocks = 0;
3066 int id = skl_wm_plane_id(intel_plane); 3263 int id = skl_wm_plane_id(intel_plane);
3067 3264
3068 if (!to_intel_plane_state(pstate)->visible) 3265 data_rate = cstate->wm.skl.plane_data_rate[id];
3069 continue;
3070 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3071 continue;
3072
3073 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3074 3266
3075 /* 3267 /*
3076 * allocation for (packed formats) or (uv-plane part of planar format): 3268 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3081,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3081 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3273 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3082 total_data_rate); 3274 total_data_rate);
3083 3275
3084 ddb->plane[pipe][id].start = start; 3276 /* Leave disabled planes at (0,0) */
3085 ddb->plane[pipe][id].end = start + plane_blocks; 3277 if (data_rate) {
3278 ddb->plane[pipe][id].start = start;
3279 ddb->plane[pipe][id].end = start + plane_blocks;
3280 }
3086 3281
3087 start += plane_blocks; 3282 start += plane_blocks;
3088 3283
3089 /* 3284 /*
3090 * allocation for y_plane part of planar format: 3285 * allocation for y_plane part of planar format:
3091 */ 3286 */
3092 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3287 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3093 y_data_rate = skl_plane_relative_data_rate(cstate, 3288
3094 pstate, 3289 y_plane_blocks = y_minimum[id];
3095 1); 3290 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3096 y_plane_blocks = y_minimum[id]; 3291 total_data_rate);
3097 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3098 total_data_rate);
3099 3292
3293 if (y_data_rate) {
3100 ddb->y_plane[pipe][id].start = start; 3294 ddb->y_plane[pipe][id].start = start;
3101 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3295 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3102
3103 start += y_plane_blocks;
3104 } 3296 }
3105 3297
3298 start += y_plane_blocks;
3106 } 3299 }
3107 3300
3301 return 0;
3108} 3302}
3109 3303
3110static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3304static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3161,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3161 return ret; 3355 return ret;
3162} 3356}
3163 3357
3164static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 3358static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3165 const struct intel_crtc *intel_crtc) 3359 struct intel_plane_state *pstate)
3166{ 3360{
3167 struct drm_device *dev = intel_crtc->base.dev; 3361 uint64_t adjusted_pixel_rate;
3168 struct drm_i915_private *dev_priv = dev->dev_private; 3362 uint64_t downscale_amount;
3169 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3363 uint64_t pixel_rate;
3364
3365 /* Shouldn't reach here on disabled planes... */
3366 if (WARN_ON(!pstate->visible))
3367 return 0;
3170 3368
3171 /* 3369 /*
3172 * If ddb allocation of pipes changed, it may require recalculation of 3370 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3173 * watermarks 3371 * with additional adjustments for plane-specific scaling.
3174 */ 3372 */
3175 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) 3373 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3176 return true; 3374 downscale_amount = skl_plane_downscale_amount(pstate);
3375
3376 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3377 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3177 3378
3178 return false; 3379 return pixel_rate;
3179} 3380}
3180 3381
3181static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3382static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3182 struct intel_crtc_state *cstate, 3383 struct intel_crtc_state *cstate,
3183 struct intel_plane *intel_plane, 3384 struct intel_plane_state *intel_pstate,
3184 uint16_t ddb_allocation, 3385 uint16_t ddb_allocation,
3185 int level, 3386 int level,
3186 uint16_t *out_blocks, /* out */ 3387 uint16_t *out_blocks, /* out */
3187 uint8_t *out_lines /* out */) 3388 uint8_t *out_lines, /* out */
3389 bool *enabled /* out */)
3188{ 3390{
3189 struct drm_plane *plane = &intel_plane->base; 3391 struct drm_plane_state *pstate = &intel_pstate->base;
3190 struct drm_framebuffer *fb = plane->state->fb; 3392 struct drm_framebuffer *fb = pstate->fb;
3191 struct intel_plane_state *intel_pstate =
3192 to_intel_plane_state(plane->state);
3193 uint32_t latency = dev_priv->wm.skl_latency[level]; 3393 uint32_t latency = dev_priv->wm.skl_latency[level];
3194 uint32_t method1, method2; 3394 uint32_t method1, method2;
3195 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3395 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3197,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3197 uint32_t selected_result; 3397 uint32_t selected_result;
3198 uint8_t cpp; 3398 uint8_t cpp;
3199 uint32_t width = 0, height = 0; 3399 uint32_t width = 0, height = 0;
3400 uint32_t plane_pixel_rate;
3200 3401
3201 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3402 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3202 return false; 3403 *enabled = false;
3404 return 0;
3405 }
3203 3406
3204 width = drm_rect_width(&intel_pstate->src) >> 16; 3407 width = drm_rect_width(&intel_pstate->src) >> 16;
3205 height = drm_rect_height(&intel_pstate->src) >> 16; 3408 height = drm_rect_height(&intel_pstate->src) >> 16;
3206 3409
3207 if (intel_rotation_90_or_270(plane->state->rotation)) 3410 if (intel_rotation_90_or_270(pstate->rotation))
3208 swap(width, height); 3411 swap(width, height);
3209 3412
3210 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3413 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3211 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3414 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3212 cpp, latency); 3415
3213 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3416 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3417 method2 = skl_wm_method2(plane_pixel_rate,
3214 cstate->base.adjusted_mode.crtc_htotal, 3418 cstate->base.adjusted_mode.crtc_htotal,
3215 width, 3419 width,
3216 cpp, 3420 cpp,
@@ -3224,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3224 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3428 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3225 uint32_t min_scanlines = 4; 3429 uint32_t min_scanlines = 4;
3226 uint32_t y_tile_minimum; 3430 uint32_t y_tile_minimum;
3227 if (intel_rotation_90_or_270(plane->state->rotation)) { 3431 if (intel_rotation_90_or_270(pstate->rotation)) {
3228 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3432 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3229 drm_format_plane_cpp(fb->pixel_format, 1) : 3433 drm_format_plane_cpp(fb->pixel_format, 1) :
3230 drm_format_plane_cpp(fb->pixel_format, 0); 3434 drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3260,40 +3464,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3260 res_blocks++; 3464 res_blocks++;
3261 } 3465 }
3262 3466
3263 if (res_blocks >= ddb_allocation || res_lines > 31) 3467 if (res_blocks >= ddb_allocation || res_lines > 31) {
3264 return false; 3468 *enabled = false;
3469
3470 /*
3471 * If there are no valid level 0 watermarks, then we can't
3472 * support this display configuration.
3473 */
3474 if (level) {
3475 return 0;
3476 } else {
3477 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3478 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3479 to_intel_crtc(cstate->base.crtc)->pipe,
3480 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3481 res_blocks, ddb_allocation, res_lines);
3482
3483 return -EINVAL;
3484 }
3485 }
3265 3486
3266 *out_blocks = res_blocks; 3487 *out_blocks = res_blocks;
3267 *out_lines = res_lines; 3488 *out_lines = res_lines;
3489 *enabled = true;
3268 3490
3269 return true; 3491 return 0;
3270} 3492}
3271 3493
3272static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3494static int
3273 struct skl_ddb_allocation *ddb, 3495skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3274 struct intel_crtc_state *cstate, 3496 struct skl_ddb_allocation *ddb,
3275 int level, 3497 struct intel_crtc_state *cstate,
3276 struct skl_wm_level *result) 3498 int level,
3499 struct skl_wm_level *result)
3277{ 3500{
3278 struct drm_device *dev = dev_priv->dev; 3501 struct drm_device *dev = dev_priv->dev;
3502 struct drm_atomic_state *state = cstate->base.state;
3279 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3503 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3504 struct drm_plane *plane;
3280 struct intel_plane *intel_plane; 3505 struct intel_plane *intel_plane;
3506 struct intel_plane_state *intel_pstate;
3281 uint16_t ddb_blocks; 3507 uint16_t ddb_blocks;
3282 enum pipe pipe = intel_crtc->pipe; 3508 enum pipe pipe = intel_crtc->pipe;
3509 int ret;
3283 3510
3284 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3511 /*
3512 * We'll only calculate watermarks for planes that are actually
3513 * enabled, so make sure all other planes are set as disabled.
3514 */
3515 memset(result, 0, sizeof(*result));
3516
3517 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
3285 int i = skl_wm_plane_id(intel_plane); 3518 int i = skl_wm_plane_id(intel_plane);
3286 3519
3520 plane = &intel_plane->base;
3521 intel_pstate = NULL;
3522 if (state)
3523 intel_pstate =
3524 intel_atomic_get_existing_plane_state(state,
3525 intel_plane);
3526
3527 /*
3528 * Note: If we start supporting multiple pending atomic commits
3529 * against the same planes/CRTC's in the future, plane->state
3530 * will no longer be the correct pre-state to use for the
3531 * calculations here and we'll need to change where we get the
3532 * 'unchanged' plane data from.
3533 *
3534 * For now this is fine because we only allow one queued commit
3535 * against a CRTC. Even if the plane isn't modified by this
3536 * transaction and we don't have a plane lock, we still have
3537 * the CRTC's lock, so we know that no other transactions are
3538 * racing with us to update it.
3539 */
3540 if (!intel_pstate)
3541 intel_pstate = to_intel_plane_state(plane->state);
3542
3543 WARN_ON(!intel_pstate->base.fb);
3544
3287 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3545 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3288 3546
3289 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3547 ret = skl_compute_plane_wm(dev_priv,
3290 cstate, 3548 cstate,
3291 intel_plane, 3549 intel_pstate,
3292 ddb_blocks, 3550 ddb_blocks,
3293 level, 3551 level,
3294 &result->plane_res_b[i], 3552 &result->plane_res_b[i],
3295 &result->plane_res_l[i]); 3553 &result->plane_res_l[i],
3554 &result->plane_en[i]);
3555 if (ret)
3556 return ret;
3296 } 3557 }
3558
3559 return 0;
3297} 3560}
3298 3561
3299static uint32_t 3562static uint32_t
@@ -3327,21 +3590,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3327 } 3590 }
3328} 3591}
3329 3592
3330static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3593static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3331 struct skl_ddb_allocation *ddb, 3594 struct skl_ddb_allocation *ddb,
3332 struct skl_pipe_wm *pipe_wm) 3595 struct skl_pipe_wm *pipe_wm)
3333{ 3596{
3334 struct drm_device *dev = cstate->base.crtc->dev; 3597 struct drm_device *dev = cstate->base.crtc->dev;
3335 const struct drm_i915_private *dev_priv = dev->dev_private; 3598 const struct drm_i915_private *dev_priv = dev->dev_private;
3336 int level, max_level = ilk_wm_max_level(dev); 3599 int level, max_level = ilk_wm_max_level(dev);
3600 int ret;
3337 3601
3338 for (level = 0; level <= max_level; level++) { 3602 for (level = 0; level <= max_level; level++) {
3339 skl_compute_wm_level(dev_priv, ddb, cstate, 3603 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3340 level, &pipe_wm->wm[level]); 3604 level, &pipe_wm->wm[level]);
3605 if (ret)
3606 return ret;
3341 } 3607 }
3342 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3608 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3343 3609
3344 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3610 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3611
3612 return 0;
3345} 3613}
3346 3614
3347static void skl_compute_wm_results(struct drm_device *dev, 3615static void skl_compute_wm_results(struct drm_device *dev,
@@ -3421,7 +3689,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3421 int i, level, max_level = ilk_wm_max_level(dev); 3689 int i, level, max_level = ilk_wm_max_level(dev);
3422 enum pipe pipe = crtc->pipe; 3690 enum pipe pipe = crtc->pipe;
3423 3691
3424 if (!new->dirty[pipe]) 3692 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3693 continue;
3694 if (!crtc->active)
3425 continue; 3695 continue;
3426 3696
3427 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3697 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3588,87 +3858,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3588 } 3858 }
3589} 3859}
3590 3860
3591static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3861static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3592 struct skl_ddb_allocation *ddb, /* out */ 3862 struct skl_ddb_allocation *ddb, /* out */
3593 struct skl_pipe_wm *pipe_wm /* out */) 3863 struct skl_pipe_wm *pipe_wm, /* out */
3864 bool *changed /* out */)
3594{ 3865{
3595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3866 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3596 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3867 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3868 int ret;
3597 3869
3598 skl_allocate_pipe_ddb(cstate, ddb); 3870 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3599 skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3871 if (ret)
3872 return ret;
3600 3873
3601 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3874 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3602 return false; 3875 *changed = false;
3603 3876 else
3604 intel_crtc->wm.active.skl = *pipe_wm; 3877 *changed = true;
3605 3878
3606 return true; 3879 return 0;
3607} 3880}
3608 3881
3609static void skl_update_other_pipe_wm(struct drm_device *dev, 3882static int
3610 struct drm_crtc *crtc, 3883skl_compute_ddb(struct drm_atomic_state *state)
3611 struct skl_wm_values *r)
3612{ 3884{
3885 struct drm_device *dev = state->dev;
3886 struct drm_i915_private *dev_priv = to_i915(dev);
3887 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3613 struct intel_crtc *intel_crtc; 3888 struct intel_crtc *intel_crtc;
3614 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3889 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3890 unsigned realloc_pipes = dev_priv->active_crtcs;
3891 int ret;
3615 3892
3616 /* 3893 /*
3617 * If the WM update hasn't changed the allocation for this_crtc (the 3894 * If this is our first atomic update following hardware readout,
3618 * crtc we are currently computing the new WM values for), other 3895 * we can't trust the DDB that the BIOS programmed for us. Let's
3619 * enabled crtcs will keep the same allocation and we don't need to 3896 * pretend that all pipes switched active status so that we'll
3620 * recompute anything for them. 3897 * ensure a full DDB recompute.
3621 */ 3898 */
3622 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3899 if (dev_priv->wm.distrust_bios_wm)
3623 return; 3900 intel_state->active_pipe_changes = ~0;
3624 3901
3625 /* 3902 /*
3626 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3903 * If the modeset changes which CRTC's are active, we need to
3627 * other active pipes need new DDB allocation and WM values. 3904 * recompute the DDB allocation for *all* active pipes, even
3905 * those that weren't otherwise being modified in any way by this
3906 * atomic commit. Due to the shrinking of the per-pipe allocations
3907 * when new active CRTC's are added, it's possible for a pipe that
3908 * we were already using and aren't changing at all here to suddenly
3909 * become invalid if its DDB needs exceeds its new allocation.
3910 *
3911 * Note that if we wind up doing a full DDB recompute, we can't let
3912 * any other display updates race with this transaction, so we need
3913 * to grab the lock on *all* CRTC's.
3628 */ 3914 */
3629 for_each_intel_crtc(dev, intel_crtc) { 3915 if (intel_state->active_pipe_changes) {
3630 struct skl_pipe_wm pipe_wm = {}; 3916 realloc_pipes = ~0;
3631 bool wm_changed; 3917 intel_state->wm_results.dirty_pipes = ~0;
3632 3918 }
3633 if (this_crtc->pipe == intel_crtc->pipe)
3634 continue;
3635
3636 if (!intel_crtc->active)
3637 continue;
3638 3919
3639 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3920 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3640 &r->ddb, &pipe_wm); 3921 struct intel_crtc_state *cstate;
3641 3922
3642 /* 3923 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3643 * If we end up re-computing the other pipe WM values, it's 3924 if (IS_ERR(cstate))
3644 * because it was really needed, so we expect the WM values to 3925 return PTR_ERR(cstate);
3645 * be different.
3646 */
3647 WARN_ON(!wm_changed);
3648 3926
3649 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); 3927 ret = skl_allocate_pipe_ddb(cstate, ddb);
3650 r->dirty[intel_crtc->pipe] = true; 3928 if (ret)
3929 return ret;
3651 } 3930 }
3931
3932 return 0;
3652} 3933}
3653 3934
3654static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) 3935static int
3936skl_compute_wm(struct drm_atomic_state *state)
3655{ 3937{
3656 watermarks->wm_linetime[pipe] = 0; 3938 struct drm_crtc *crtc;
3657 memset(watermarks->plane[pipe], 0, 3939 struct drm_crtc_state *cstate;
3658 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3940 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3659 memset(watermarks->plane_trans[pipe], 3941 struct skl_wm_values *results = &intel_state->wm_results;
3660 0, sizeof(uint32_t) * I915_MAX_PLANES); 3942 struct skl_pipe_wm *pipe_wm;
3661 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3943 bool changed = false;
3944 int ret, i;
3662 3945
3663 /* Clear ddb entries for pipe */ 3946 /*
3664 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); 3947 * If this transaction isn't actually touching any CRTC's, don't
3665 memset(&watermarks->ddb.plane[pipe], 0, 3948 * bother with watermark calculation. Note that if we pass this
3666 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3949 * test, we're guaranteed to hold at least one CRTC state mutex,
3667 memset(&watermarks->ddb.y_plane[pipe], 0, 3950 * which means we can safely use values like dev_priv->active_crtcs
3668 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3951 * since any racing commits that want to update them would need to
3669 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, 3952 * hold _all_ CRTC state mutexes.
3670 sizeof(struct skl_ddb_entry)); 3953 */
3954 for_each_crtc_in_state(state, crtc, cstate, i)
3955 changed = true;
3956 if (!changed)
3957 return 0;
3958
3959 /* Clear all dirty flags */
3960 results->dirty_pipes = 0;
3961
3962 ret = skl_compute_ddb(state);
3963 if (ret)
3964 return ret;
3965
3966 /*
3967 * Calculate WM's for all pipes that are part of this transaction.
3968 * Note that the DDB allocation above may have added more CRTC's that
3969 * weren't otherwise being modified (and set bits in dirty_pipes) if
3970 * pipe allocations had to change.
3971 *
3972 * FIXME: Now that we're doing this in the atomic check phase, we
3973 * should allow skl_update_pipe_wm() to return failure in cases where
3974 * no suitable watermark values can be found.
3975 */
3976 for_each_crtc_in_state(state, crtc, cstate, i) {
3977 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3978 struct intel_crtc_state *intel_cstate =
3979 to_intel_crtc_state(cstate);
3980
3981 pipe_wm = &intel_cstate->wm.skl.optimal;
3982 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3983 &changed);
3984 if (ret)
3985 return ret;
3671 3986
3987 if (changed)
3988 results->dirty_pipes |= drm_crtc_mask(crtc);
3989
3990 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3991 /* This pipe's WM's did not change */
3992 continue;
3993
3994 intel_cstate->update_wm_pre = true;
3995 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
3996 }
3997
3998 return 0;
3672} 3999}
3673 4000
3674static void skl_update_wm(struct drm_crtc *crtc) 4001static void skl_update_wm(struct drm_crtc *crtc)
@@ -3678,26 +4005,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
3678 struct drm_i915_private *dev_priv = dev->dev_private; 4005 struct drm_i915_private *dev_priv = dev->dev_private;
3679 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4006 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3680 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4007 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3681 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; 4008 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
3682
3683
3684 /* Clear all dirty flags */
3685 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3686
3687 skl_clear_wm(results, intel_crtc->pipe);
3688 4009
3689 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) 4010 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3690 return; 4011 return;
3691 4012
3692 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); 4013 intel_crtc->wm.active.skl = *pipe_wm;
3693 results->dirty[intel_crtc->pipe] = true; 4014
4015 mutex_lock(&dev_priv->wm.wm_mutex);
3694 4016
3695 skl_update_other_pipe_wm(dev, crtc, results);
3696 skl_write_wm_values(dev_priv, results); 4017 skl_write_wm_values(dev_priv, results);
3697 skl_flush_wm_values(dev_priv, results); 4018 skl_flush_wm_values(dev_priv, results);
3698 4019
3699 /* store the new configuration */ 4020 /* store the new configuration */
3700 dev_priv->wm.skl_hw = *results; 4021 dev_priv->wm.skl_hw = *results;
4022
4023 mutex_unlock(&dev_priv->wm.wm_mutex);
3701} 4024}
3702 4025
3703static void ilk_compute_wm_config(struct drm_device *dev, 4026static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3757,7 +4080,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3757 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4080 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3758 4081
3759 mutex_lock(&dev_priv->wm.wm_mutex); 4082 mutex_lock(&dev_priv->wm.wm_mutex);
3760 intel_crtc->wm.active.ilk = cstate->wm.intermediate; 4083 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
3761 ilk_program_watermarks(dev_priv); 4084 ilk_program_watermarks(dev_priv);
3762 mutex_unlock(&dev_priv->wm.wm_mutex); 4085 mutex_unlock(&dev_priv->wm.wm_mutex);
3763} 4086}
@@ -3769,7 +4092,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3769 4092
3770 mutex_lock(&dev_priv->wm.wm_mutex); 4093 mutex_lock(&dev_priv->wm.wm_mutex);
3771 if (cstate->wm.need_postvbl_update) { 4094 if (cstate->wm.need_postvbl_update) {
3772 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; 4095 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
3773 ilk_program_watermarks(dev_priv); 4096 ilk_program_watermarks(dev_priv);
3774 } 4097 }
3775 mutex_unlock(&dev_priv->wm.wm_mutex); 4098 mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3826,7 +4149,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3826 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4149 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3828 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4151 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3829 struct skl_pipe_wm *active = &cstate->wm.optimal.skl; 4152 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
3830 enum pipe pipe = intel_crtc->pipe; 4153 enum pipe pipe = intel_crtc->pipe;
3831 int level, i, max_level; 4154 int level, i, max_level;
3832 uint32_t temp; 4155 uint32_t temp;
@@ -3849,7 +4172,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 if (!intel_crtc->active) 4172 if (!intel_crtc->active)
3850 return; 4173 return;
3851 4174
3852 hw->dirty[pipe] = true; 4175 hw->dirty_pipes |= drm_crtc_mask(crtc);
3853 4176
3854 active->linetime = hw->wm_linetime[pipe]; 4177 active->linetime = hw->wm_linetime[pipe];
3855 4178
@@ -3883,6 +4206,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
3883 skl_ddb_get_hw_state(dev_priv, ddb); 4206 skl_ddb_get_hw_state(dev_priv, ddb);
3884 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3885 skl_pipe_wm_get_hw_state(crtc); 4208 skl_pipe_wm_get_hw_state(crtc);
4209
4210 if (dev_priv->active_crtcs) {
4211 /* Fully recompute DDB on first atomic commit */
4212 dev_priv->wm.distrust_bios_wm = true;
4213 } else {
4214 /* Easy/common case; just sanitize DDB now if everything off */
4215 memset(ddb, 0, sizeof(*ddb));
4216 }
3886} 4217}
3887 4218
3888static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4219static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
@@ -3892,7 +4223,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3892 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4223 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3894 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4225 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3895 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 4226 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
3896 enum pipe pipe = intel_crtc->pipe; 4227 enum pipe pipe = intel_crtc->pipe;
3897 static const i915_reg_t wm0_pipe_reg[] = { 4228 static const i915_reg_t wm0_pipe_reg[] = {
3898 [PIPE_A] = WM0_PIPEA_ILK, 4229 [PIPE_A] = WM0_PIPEA_ILK,
@@ -4169,9 +4500,8 @@ DEFINE_SPINLOCK(mchdev_lock);
4169 * mchdev_lock. */ 4500 * mchdev_lock. */
4170static struct drm_i915_private *i915_mch_dev; 4501static struct drm_i915_private *i915_mch_dev;
4171 4502
4172bool ironlake_set_drps(struct drm_device *dev, u8 val) 4503bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4173{ 4504{
4174 struct drm_i915_private *dev_priv = dev->dev_private;
4175 u16 rgvswctl; 4505 u16 rgvswctl;
4176 4506
4177 assert_spin_locked(&mchdev_lock); 4507 assert_spin_locked(&mchdev_lock);
@@ -4193,9 +4523,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
4193 return true; 4523 return true;
4194} 4524}
4195 4525
4196static void ironlake_enable_drps(struct drm_device *dev) 4526static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4197{ 4527{
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4199 u32 rgvmodectl; 4528 u32 rgvmodectl;
4200 u8 fmax, fmin, fstart, vstart; 4529 u8 fmax, fmin, fstart, vstart;
4201 4530
@@ -4252,7 +4581,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4252 DRM_ERROR("stuck trying to change perf mode\n"); 4581 DRM_ERROR("stuck trying to change perf mode\n");
4253 mdelay(1); 4582 mdelay(1);
4254 4583
4255 ironlake_set_drps(dev, fstart); 4584 ironlake_set_drps(dev_priv, fstart);
4256 4585
4257 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4586 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4258 I915_READ(DDREC) + I915_READ(CSIEC); 4587 I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4263,9 +4592,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
4263 spin_unlock_irq(&mchdev_lock); 4592 spin_unlock_irq(&mchdev_lock);
4264} 4593}
4265 4594
4266static void ironlake_disable_drps(struct drm_device *dev) 4595static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4267{ 4596{
4268 struct drm_i915_private *dev_priv = dev->dev_private;
4269 u16 rgvswctl; 4597 u16 rgvswctl;
4270 4598
4271 spin_lock_irq(&mchdev_lock); 4599 spin_lock_irq(&mchdev_lock);
@@ -4280,7 +4608,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
4280 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4608 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4281 4609
4282 /* Go back to the starting frequency */ 4610 /* Go back to the starting frequency */
4283 ironlake_set_drps(dev, dev_priv->ips.fstart); 4611 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4284 mdelay(1); 4612 mdelay(1);
4285 rgvswctl |= MEMCTL_CMD_STS; 4613 rgvswctl |= MEMCTL_CMD_STS;
4286 I915_WRITE(MEMSWCTL, rgvswctl); 4614 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4424,12 +4752,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4424/* gen6_set_rps is called to update the frequency request, but should also be 4752/* gen6_set_rps is called to update the frequency request, but should also be
4425 * called when the range (min_delay and max_delay) is modified so that we can 4753 * called when the range (min_delay and max_delay) is modified so that we can
4426 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4754 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4427static void gen6_set_rps(struct drm_device *dev, u8 val) 4755static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4428{ 4756{
4429 struct drm_i915_private *dev_priv = dev->dev_private;
4430
4431 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4757 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4432 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 4758 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4433 return; 4759 return;
4434 4760
4435 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4761 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4442,10 +4768,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4442 if (val != dev_priv->rps.cur_freq) { 4768 if (val != dev_priv->rps.cur_freq) {
4443 gen6_set_rps_thresholds(dev_priv, val); 4769 gen6_set_rps_thresholds(dev_priv, val);
4444 4770
4445 if (IS_GEN9(dev)) 4771 if (IS_GEN9(dev_priv))
4446 I915_WRITE(GEN6_RPNSWREQ, 4772 I915_WRITE(GEN6_RPNSWREQ,
4447 GEN9_FREQUENCY(val)); 4773 GEN9_FREQUENCY(val));
4448 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4774 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4449 I915_WRITE(GEN6_RPNSWREQ, 4775 I915_WRITE(GEN6_RPNSWREQ,
4450 HSW_FREQUENCY(val)); 4776 HSW_FREQUENCY(val));
4451 else 4777 else
@@ -4467,15 +4793,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4467 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4793 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4468} 4794}
4469 4795
4470static void valleyview_set_rps(struct drm_device *dev, u8 val) 4796static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4471{ 4797{
4472 struct drm_i915_private *dev_priv = dev->dev_private;
4473
4474 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4798 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4475 WARN_ON(val > dev_priv->rps.max_freq); 4799 WARN_ON(val > dev_priv->rps.max_freq);
4476 WARN_ON(val < dev_priv->rps.min_freq); 4800 WARN_ON(val < dev_priv->rps.min_freq);
4477 4801
4478 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4802 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4479 "Odd GPU freq value\n")) 4803 "Odd GPU freq value\n"))
4480 val &= ~1; 4804 val &= ~1;
4481 4805
@@ -4508,7 +4832,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4508 /* Wake up the media well, as that takes a lot less 4832 /* Wake up the media well, as that takes a lot less
4509 * power than the Render well. */ 4833 * power than the Render well. */
4510 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4834 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4511 valleyview_set_rps(dev_priv->dev, val); 4835 valleyview_set_rps(dev_priv, val);
4512 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4836 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4513} 4837}
4514 4838
@@ -4526,14 +4850,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4526 4850
4527void gen6_rps_idle(struct drm_i915_private *dev_priv) 4851void gen6_rps_idle(struct drm_i915_private *dev_priv)
4528{ 4852{
4529 struct drm_device *dev = dev_priv->dev;
4530
4531 mutex_lock(&dev_priv->rps.hw_lock); 4853 mutex_lock(&dev_priv->rps.hw_lock);
4532 if (dev_priv->rps.enabled) { 4854 if (dev_priv->rps.enabled) {
4533 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4855 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4534 vlv_set_rps_idle(dev_priv); 4856 vlv_set_rps_idle(dev_priv);
4535 else 4857 else
4536 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4858 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4537 dev_priv->rps.last_adj = 0; 4859 dev_priv->rps.last_adj = 0;
4538 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4860 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4539 } 4861 }
@@ -4581,49 +4903,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4581 spin_unlock(&dev_priv->rps.client_lock); 4903 spin_unlock(&dev_priv->rps.client_lock);
4582} 4904}
4583 4905
4584void intel_set_rps(struct drm_device *dev, u8 val) 4906void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4585{ 4907{
4586 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4908 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4587 valleyview_set_rps(dev, val); 4909 valleyview_set_rps(dev_priv, val);
4588 else 4910 else
4589 gen6_set_rps(dev, val); 4911 gen6_set_rps(dev_priv, val);
4590} 4912}
4591 4913
4592static void gen9_disable_rc6(struct drm_device *dev) 4914static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4593{ 4915{
4594 struct drm_i915_private *dev_priv = dev->dev_private;
4595
4596 I915_WRITE(GEN6_RC_CONTROL, 0); 4916 I915_WRITE(GEN6_RC_CONTROL, 0);
4597 I915_WRITE(GEN9_PG_ENABLE, 0); 4917 I915_WRITE(GEN9_PG_ENABLE, 0);
4598} 4918}
4599 4919
4600static void gen9_disable_rps(struct drm_device *dev) 4920static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4601{ 4921{
4602 struct drm_i915_private *dev_priv = dev->dev_private;
4603
4604 I915_WRITE(GEN6_RP_CONTROL, 0); 4922 I915_WRITE(GEN6_RP_CONTROL, 0);
4605} 4923}
4606 4924
4607static void gen6_disable_rps(struct drm_device *dev) 4925static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4608{ 4926{
4609 struct drm_i915_private *dev_priv = dev->dev_private;
4610
4611 I915_WRITE(GEN6_RC_CONTROL, 0); 4927 I915_WRITE(GEN6_RC_CONTROL, 0);
4612 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4928 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4613 I915_WRITE(GEN6_RP_CONTROL, 0); 4929 I915_WRITE(GEN6_RP_CONTROL, 0);
4614} 4930}
4615 4931
4616static void cherryview_disable_rps(struct drm_device *dev) 4932static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4617{ 4933{
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619
4620 I915_WRITE(GEN6_RC_CONTROL, 0); 4934 I915_WRITE(GEN6_RC_CONTROL, 0);
4621} 4935}
4622 4936
4623static void valleyview_disable_rps(struct drm_device *dev) 4937static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4624{ 4938{
4625 struct drm_i915_private *dev_priv = dev->dev_private;
4626
4627 /* we're doing forcewake before Disabling RC6, 4939 /* we're doing forcewake before Disabling RC6,
4628 * This what the BIOS expects when going into suspend */ 4940 * This what the BIOS expects when going into suspend */
4629 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4941 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4633,15 +4945,15 @@ static void valleyview_disable_rps(struct drm_device *dev)
4633 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4945 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4634} 4946}
4635 4947
4636static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4948static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4637{ 4949{
4638 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 4950 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4639 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4951 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4640 mode = GEN6_RC_CTL_RC6_ENABLE; 4952 mode = GEN6_RC_CTL_RC6_ENABLE;
4641 else 4953 else
4642 mode = 0; 4954 mode = 0;
4643 } 4955 }
4644 if (HAS_RC6p(dev)) 4956 if (HAS_RC6p(dev_priv))
4645 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4957 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4646 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4958 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4647 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4959 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
@@ -4652,9 +4964,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4652 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 4964 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4653} 4965}
4654 4966
4655static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) 4967static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4656{ 4968{
4657 struct drm_i915_private *dev_priv = to_i915(dev);
4658 struct i915_ggtt *ggtt = &dev_priv->ggtt; 4969 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4659 bool enable_rc6 = true; 4970 bool enable_rc6 = true;
4660 unsigned long rc6_ctx_base; 4971 unsigned long rc6_ctx_base;
@@ -4695,16 +5006,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4695 return enable_rc6; 5006 return enable_rc6;
4696} 5007}
4697 5008
4698int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 5009int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
4699{ 5010{
4700 /* No RC6 before Ironlake and code is gone for ilk. */ 5011 /* No RC6 before Ironlake and code is gone for ilk. */
4701 if (INTEL_INFO(dev)->gen < 6) 5012 if (INTEL_INFO(dev_priv)->gen < 6)
4702 return 0; 5013 return 0;
4703 5014
4704 if (!enable_rc6) 5015 if (!enable_rc6)
4705 return 0; 5016 return 0;
4706 5017
4707 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { 5018 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
4708 DRM_INFO("RC6 disabled by BIOS\n"); 5019 DRM_INFO("RC6 disabled by BIOS\n");
4709 return 0; 5020 return 0;
4710 } 5021 }
@@ -4713,7 +5024,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4713 if (enable_rc6 >= 0) { 5024 if (enable_rc6 >= 0) {
4714 int mask; 5025 int mask;
4715 5026
4716 if (HAS_RC6p(dev)) 5027 if (HAS_RC6p(dev_priv))
4717 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 5028 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4718 INTEL_RC6pp_ENABLE; 5029 INTEL_RC6pp_ENABLE;
4719 else 5030 else
@@ -4726,20 +5037,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4726 return enable_rc6 & mask; 5037 return enable_rc6 & mask;
4727 } 5038 }
4728 5039
4729 if (IS_IVYBRIDGE(dev)) 5040 if (IS_IVYBRIDGE(dev_priv))
4730 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 5041 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4731 5042
4732 return INTEL_RC6_ENABLE; 5043 return INTEL_RC6_ENABLE;
4733} 5044}
4734 5045
4735int intel_enable_rc6(const struct drm_device *dev) 5046static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
4736{ 5047{
4737 return i915.enable_rc6;
4738}
4739
4740static void gen6_init_rps_frequencies(struct drm_device *dev)
4741{
4742 struct drm_i915_private *dev_priv = dev->dev_private;
4743 uint32_t rp_state_cap; 5048 uint32_t rp_state_cap;
4744 u32 ddcc_status = 0; 5049 u32 ddcc_status = 0;
4745 int ret; 5050 int ret;
@@ -4747,7 +5052,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4747 /* All of these values are in units of 50MHz */ 5052 /* All of these values are in units of 50MHz */
4748 dev_priv->rps.cur_freq = 0; 5053 dev_priv->rps.cur_freq = 0;
4749 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 5054 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4750 if (IS_BROXTON(dev)) { 5055 if (IS_BROXTON(dev_priv)) {
4751 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 5056 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4752 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 5057 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4753 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5058 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4763,8 +5068,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4763 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 5068 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4764 5069
4765 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 5070 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4766 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || 5071 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
4767 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5072 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4768 ret = sandybridge_pcode_read(dev_priv, 5073 ret = sandybridge_pcode_read(dev_priv,
4769 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 5074 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4770 &ddcc_status); 5075 &ddcc_status);
@@ -4776,7 +5081,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4776 dev_priv->rps.max_freq); 5081 dev_priv->rps.max_freq);
4777 } 5082 }
4778 5083
4779 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5084 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4780 /* Store the frequency values in 16.66 MHZ units, which is 5085 /* Store the frequency values in 16.66 MHZ units, which is
4781 the natural hardware unit for SKL */ 5086 the natural hardware unit for SKL */
4782 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 5087 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4793,7 +5098,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4793 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5098 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4794 5099
4795 if (dev_priv->rps.min_freq_softlimit == 0) { 5100 if (dev_priv->rps.min_freq_softlimit == 0) {
4796 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5101 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4797 dev_priv->rps.min_freq_softlimit = 5102 dev_priv->rps.min_freq_softlimit =
4798 max_t(int, dev_priv->rps.efficient_freq, 5103 max_t(int, dev_priv->rps.efficient_freq,
4799 intel_freq_opcode(dev_priv, 450)); 5104 intel_freq_opcode(dev_priv, 450));
@@ -4804,16 +5109,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4804} 5109}
4805 5110
4806/* See the Gen9_GT_PM_Programming_Guide doc for the below */ 5111/* See the Gen9_GT_PM_Programming_Guide doc for the below */
4807static void gen9_enable_rps(struct drm_device *dev) 5112static void gen9_enable_rps(struct drm_i915_private *dev_priv)
4808{ 5113{
4809 struct drm_i915_private *dev_priv = dev->dev_private;
4810
4811 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5114 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4812 5115
4813 gen6_init_rps_frequencies(dev); 5116 gen6_init_rps_frequencies(dev_priv);
4814 5117
4815 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 5118 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4816 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5119 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4817 /* 5120 /*
4818 * BIOS could leave the Hw Turbo enabled, so need to explicitly 5121 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4819 * clear out the Control register just to avoid inconsitency 5122 * clear out the Control register just to avoid inconsitency
@@ -4823,7 +5126,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4823 * if the Turbo is left enabled in the Control register, as the 5126 * if the Turbo is left enabled in the Control register, as the
4824 * Up/Down interrupts would remain masked. 5127 * Up/Down interrupts would remain masked.
4825 */ 5128 */
4826 gen9_disable_rps(dev); 5129 gen9_disable_rps(dev_priv);
4827 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5130 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4828 return; 5131 return;
4829 } 5132 }
@@ -4842,14 +5145,13 @@ static void gen9_enable_rps(struct drm_device *dev)
4842 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5145 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4843 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5146 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4844 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5147 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4845 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5148 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4846 5149
4847 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5150 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4848} 5151}
4849 5152
4850static void gen9_enable_rc6(struct drm_device *dev) 5153static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
4851{ 5154{
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4853 struct intel_engine_cs *engine; 5155 struct intel_engine_cs *engine;
4854 uint32_t rc6_mask = 0; 5156 uint32_t rc6_mask = 0;
4855 5157
@@ -4866,7 +5168,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4866 /* 2b: Program RC6 thresholds.*/ 5168 /* 2b: Program RC6 thresholds.*/
4867 5169
4868 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5170 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4869 if (IS_SKYLAKE(dev)) 5171 if (IS_SKYLAKE(dev_priv))
4870 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5172 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4871 else 5173 else
4872 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5174 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4875,7 +5177,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4875 for_each_engine(engine, dev_priv) 5177 for_each_engine(engine, dev_priv)
4876 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5178 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4877 5179
4878 if (HAS_GUC_UCODE(dev)) 5180 if (HAS_GUC(dev_priv))
4879 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5181 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4880 5182
4881 I915_WRITE(GEN6_RC_SLEEP, 0); 5183 I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4885,12 +5187,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
4885 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5187 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4886 5188
4887 /* 3a: Enable RC6 */ 5189 /* 3a: Enable RC6 */
4888 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5190 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4889 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5191 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4890 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5192 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4891 /* WaRsUseTimeoutMode */ 5193 /* WaRsUseTimeoutMode */
4892 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 5194 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
4893 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5195 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4894 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5196 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4895 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5197 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4896 GEN7_RC_CTL_TO_MODE | 5198 GEN7_RC_CTL_TO_MODE |
@@ -4906,19 +5208,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
4906 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5208 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4907 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5209 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4908 */ 5210 */
4909 if (NEEDS_WaRsDisableCoarsePowerGating(dev)) 5211 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
4910 I915_WRITE(GEN9_PG_ENABLE, 0); 5212 I915_WRITE(GEN9_PG_ENABLE, 0);
4911 else 5213 else
4912 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5214 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4913 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5215 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4914 5216
4915 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5217 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4916
4917} 5218}
4918 5219
4919static void gen8_enable_rps(struct drm_device *dev) 5220static void gen8_enable_rps(struct drm_i915_private *dev_priv)
4920{ 5221{
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 struct intel_engine_cs *engine; 5222 struct intel_engine_cs *engine;
4923 uint32_t rc6_mask = 0; 5223 uint32_t rc6_mask = 0;
4924 5224
@@ -4933,7 +5233,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4933 I915_WRITE(GEN6_RC_CONTROL, 0); 5233 I915_WRITE(GEN6_RC_CONTROL, 0);
4934 5234
4935 /* Initialize rps frequencies */ 5235 /* Initialize rps frequencies */
4936 gen6_init_rps_frequencies(dev); 5236 gen6_init_rps_frequencies(dev_priv);
4937 5237
4938 /* 2b: Program RC6 thresholds.*/ 5238 /* 2b: Program RC6 thresholds.*/
4939 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5239 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4942,16 +5242,16 @@ static void gen8_enable_rps(struct drm_device *dev)
4942 for_each_engine(engine, dev_priv) 5242 for_each_engine(engine, dev_priv)
4943 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5243 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4944 I915_WRITE(GEN6_RC_SLEEP, 0); 5244 I915_WRITE(GEN6_RC_SLEEP, 0);
4945 if (IS_BROADWELL(dev)) 5245 if (IS_BROADWELL(dev_priv))
4946 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5246 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4947 else 5247 else
4948 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5248 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4949 5249
4950 /* 3: Enable RC6 */ 5250 /* 3: Enable RC6 */
4951 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5251 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4952 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5252 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4953 intel_print_rc6_info(dev, rc6_mask); 5253 intel_print_rc6_info(dev_priv, rc6_mask);
4954 if (IS_BROADWELL(dev)) 5254 if (IS_BROADWELL(dev_priv))
4955 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5255 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4956 GEN7_RC_CTL_TO_MODE | 5256 GEN7_RC_CTL_TO_MODE |
4957 rc6_mask); 5257 rc6_mask);
@@ -4992,14 +5292,13 @@ static void gen8_enable_rps(struct drm_device *dev)
4992 /* 6: Ring frequency + overclocking (our driver does this later */ 5292 /* 6: Ring frequency + overclocking (our driver does this later */
4993 5293
4994 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5294 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4995 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5295 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4996 5296
4997 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5297 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4998} 5298}
4999 5299
5000static void gen6_enable_rps(struct drm_device *dev) 5300static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5001{ 5301{
5002 struct drm_i915_private *dev_priv = dev->dev_private;
5003 struct intel_engine_cs *engine; 5302 struct intel_engine_cs *engine;
5004 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 5303 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5005 u32 gtfifodbg; 5304 u32 gtfifodbg;
@@ -5026,7 +5325,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5026 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5325 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5027 5326
5028 /* Initialize rps frequencies */ 5327 /* Initialize rps frequencies */
5029 gen6_init_rps_frequencies(dev); 5328 gen6_init_rps_frequencies(dev_priv);
5030 5329
5031 /* disable the counters and set deterministic thresholds */ 5330 /* disable the counters and set deterministic thresholds */
5032 I915_WRITE(GEN6_RC_CONTROL, 0); 5331 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5042,7 +5341,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5042 5341
5043 I915_WRITE(GEN6_RC_SLEEP, 0); 5342 I915_WRITE(GEN6_RC_SLEEP, 0);
5044 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5343 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5045 if (IS_IVYBRIDGE(dev)) 5344 if (IS_IVYBRIDGE(dev_priv))
5046 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5345 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5047 else 5346 else
5048 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5347 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5050,12 +5349,12 @@ static void gen6_enable_rps(struct drm_device *dev)
5050 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5349 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5051 5350
5052 /* Check if we are enabling RC6 */ 5351 /* Check if we are enabling RC6 */
5053 rc6_mode = intel_enable_rc6(dev_priv->dev); 5352 rc6_mode = intel_enable_rc6();
5054 if (rc6_mode & INTEL_RC6_ENABLE) 5353 if (rc6_mode & INTEL_RC6_ENABLE)
5055 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5354 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5056 5355
5057 /* We don't use those on Haswell */ 5356 /* We don't use those on Haswell */
5058 if (!IS_HASWELL(dev)) { 5357 if (!IS_HASWELL(dev_priv)) {
5059 if (rc6_mode & INTEL_RC6p_ENABLE) 5358 if (rc6_mode & INTEL_RC6p_ENABLE)
5060 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5359 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5061 5360
@@ -5063,7 +5362,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5063 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5362 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5064 } 5363 }
5065 5364
5066 intel_print_rc6_info(dev, rc6_mask); 5365 intel_print_rc6_info(dev_priv, rc6_mask);
5067 5366
5068 I915_WRITE(GEN6_RC_CONTROL, 5367 I915_WRITE(GEN6_RC_CONTROL,
5069 rc6_mask | 5368 rc6_mask |
@@ -5087,13 +5386,13 @@ static void gen6_enable_rps(struct drm_device *dev)
5087 } 5386 }
5088 5387
5089 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5388 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5090 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5389 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5091 5390
5092 rc6vids = 0; 5391 rc6vids = 0;
5093 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5392 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5094 if (IS_GEN6(dev) && ret) { 5393 if (IS_GEN6(dev_priv) && ret) {
5095 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5394 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5096 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5395 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5097 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5396 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5098 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5397 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5099 rc6vids &= 0xffff00; 5398 rc6vids &= 0xffff00;
@@ -5106,9 +5405,8 @@ static void gen6_enable_rps(struct drm_device *dev)
5106 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5405 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5107} 5406}
5108 5407
5109static void __gen6_update_ring_freq(struct drm_device *dev) 5408static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5110{ 5409{
5111 struct drm_i915_private *dev_priv = dev->dev_private;
5112 int min_freq = 15; 5410 int min_freq = 15;
5113 unsigned int gpu_freq; 5411 unsigned int gpu_freq;
5114 unsigned int max_ia_freq, min_ring_freq; 5412 unsigned int max_ia_freq, min_ring_freq;
@@ -5137,7 +5435,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5137 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5435 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5138 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5436 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5139 5437
5140 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5438 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5141 /* Convert GT frequency to 50 HZ units */ 5439 /* Convert GT frequency to 50 HZ units */
5142 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5440 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5143 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5441 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5155,16 +5453,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5155 int diff = max_gpu_freq - gpu_freq; 5453 int diff = max_gpu_freq - gpu_freq;
5156 unsigned int ia_freq = 0, ring_freq = 0; 5454 unsigned int ia_freq = 0, ring_freq = 0;
5157 5455
5158 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5456 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5159 /* 5457 /*
5160 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5458 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5161 * No floor required for ring frequency on SKL. 5459 * No floor required for ring frequency on SKL.
5162 */ 5460 */
5163 ring_freq = gpu_freq; 5461 ring_freq = gpu_freq;
5164 } else if (INTEL_INFO(dev)->gen >= 8) { 5462 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5165 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5463 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5166 ring_freq = max(min_ring_freq, gpu_freq); 5464 ring_freq = max(min_ring_freq, gpu_freq);
5167 } else if (IS_HASWELL(dev)) { 5465 } else if (IS_HASWELL(dev_priv)) {
5168 ring_freq = mult_frac(gpu_freq, 5, 4); 5466 ring_freq = mult_frac(gpu_freq, 5, 4);
5169 ring_freq = max(min_ring_freq, ring_freq); 5467 ring_freq = max(min_ring_freq, ring_freq);
5170 /* leave ia_freq as the default, chosen by cpufreq */ 5468 /* leave ia_freq as the default, chosen by cpufreq */
@@ -5191,26 +5489,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5191 } 5489 }
5192} 5490}
5193 5491
5194void gen6_update_ring_freq(struct drm_device *dev) 5492void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5195{ 5493{
5196 struct drm_i915_private *dev_priv = dev->dev_private; 5494 if (!HAS_CORE_RING_FREQ(dev_priv))
5197
5198 if (!HAS_CORE_RING_FREQ(dev))
5199 return; 5495 return;
5200 5496
5201 mutex_lock(&dev_priv->rps.hw_lock); 5497 mutex_lock(&dev_priv->rps.hw_lock);
5202 __gen6_update_ring_freq(dev); 5498 __gen6_update_ring_freq(dev_priv);
5203 mutex_unlock(&dev_priv->rps.hw_lock); 5499 mutex_unlock(&dev_priv->rps.hw_lock);
5204} 5500}
5205 5501
5206static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5502static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5207{ 5503{
5208 struct drm_device *dev = dev_priv->dev;
5209 u32 val, rp0; 5504 u32 val, rp0;
5210 5505
5211 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5506 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5212 5507
5213 switch (INTEL_INFO(dev)->eu_total) { 5508 switch (INTEL_INFO(dev_priv)->eu_total) {
5214 case 8: 5509 case 8:
5215 /* (2 * 4) config */ 5510 /* (2 * 4) config */
5216 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5511 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5321,9 +5616,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5321 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5616 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5322} 5617}
5323 5618
5324static void cherryview_setup_pctx(struct drm_device *dev) 5619static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5325{ 5620{
5326 struct drm_i915_private *dev_priv = to_i915(dev);
5327 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5621 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5328 unsigned long pctx_paddr, paddr; 5622 unsigned long pctx_paddr, paddr;
5329 u32 pcbr; 5623 u32 pcbr;
@@ -5342,15 +5636,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5342 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5636 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5343} 5637}
5344 5638
5345static void valleyview_setup_pctx(struct drm_device *dev) 5639static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5346{ 5640{
5347 struct drm_i915_private *dev_priv = dev->dev_private;
5348 struct drm_i915_gem_object *pctx; 5641 struct drm_i915_gem_object *pctx;
5349 unsigned long pctx_paddr; 5642 unsigned long pctx_paddr;
5350 u32 pcbr; 5643 u32 pcbr;
5351 int pctx_size = 24*1024; 5644 int pctx_size = 24*1024;
5352 5645
5353 mutex_lock(&dev->struct_mutex); 5646 mutex_lock(&dev_priv->dev->struct_mutex);
5354 5647
5355 pcbr = I915_READ(VLV_PCBR); 5648 pcbr = I915_READ(VLV_PCBR);
5356 if (pcbr) { 5649 if (pcbr) {
@@ -5375,7 +5668,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5375 * overlap with other ranges, such as the frame buffer, protected 5668 * overlap with other ranges, such as the frame buffer, protected
5376 * memory, or any other relevant ranges. 5669 * memory, or any other relevant ranges.
5377 */ 5670 */
5378 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5671 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
5379 if (!pctx) { 5672 if (!pctx) {
5380 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5673 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5381 goto out; 5674 goto out;
@@ -5387,13 +5680,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5387out: 5680out:
5388 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5681 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5389 dev_priv->vlv_pctx = pctx; 5682 dev_priv->vlv_pctx = pctx;
5390 mutex_unlock(&dev->struct_mutex); 5683 mutex_unlock(&dev_priv->dev->struct_mutex);
5391} 5684}
5392 5685
5393static void valleyview_cleanup_pctx(struct drm_device *dev) 5686static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5394{ 5687{
5395 struct drm_i915_private *dev_priv = dev->dev_private;
5396
5397 if (WARN_ON(!dev_priv->vlv_pctx)) 5688 if (WARN_ON(!dev_priv->vlv_pctx))
5398 return; 5689 return;
5399 5690
@@ -5412,12 +5703,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5412 dev_priv->rps.gpll_ref_freq); 5703 dev_priv->rps.gpll_ref_freq);
5413} 5704}
5414 5705
5415static void valleyview_init_gt_powersave(struct drm_device *dev) 5706static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5416{ 5707{
5417 struct drm_i915_private *dev_priv = dev->dev_private;
5418 u32 val; 5708 u32 val;
5419 5709
5420 valleyview_setup_pctx(dev); 5710 valleyview_setup_pctx(dev_priv);
5421 5711
5422 vlv_init_gpll_ref_freq(dev_priv); 5712 vlv_init_gpll_ref_freq(dev_priv);
5423 5713
@@ -5471,12 +5761,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
5471 mutex_unlock(&dev_priv->rps.hw_lock); 5761 mutex_unlock(&dev_priv->rps.hw_lock);
5472} 5762}
5473 5763
5474static void cherryview_init_gt_powersave(struct drm_device *dev) 5764static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5475{ 5765{
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 u32 val; 5766 u32 val;
5478 5767
5479 cherryview_setup_pctx(dev); 5768 cherryview_setup_pctx(dev_priv);
5480 5769
5481 vlv_init_gpll_ref_freq(dev_priv); 5770 vlv_init_gpll_ref_freq(dev_priv);
5482 5771
@@ -5536,14 +5825,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5536 mutex_unlock(&dev_priv->rps.hw_lock); 5825 mutex_unlock(&dev_priv->rps.hw_lock);
5537} 5826}
5538 5827
5539static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5828static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5540{ 5829{
5541 valleyview_cleanup_pctx(dev); 5830 valleyview_cleanup_pctx(dev_priv);
5542} 5831}
5543 5832
5544static void cherryview_enable_rps(struct drm_device *dev) 5833static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5545{ 5834{
5546 struct drm_i915_private *dev_priv = dev->dev_private;
5547 struct intel_engine_cs *engine; 5835 struct intel_engine_cs *engine;
5548 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5836 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5549 5837
@@ -5588,8 +5876,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
5588 pcbr = I915_READ(VLV_PCBR); 5876 pcbr = I915_READ(VLV_PCBR);
5589 5877
5590 /* 3: Enable RC6 */ 5878 /* 3: Enable RC6 */
5591 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5879 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5592 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5880 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5593 rc6_mode = GEN7_RC_CTL_TO_MODE; 5881 rc6_mode = GEN7_RC_CTL_TO_MODE;
5594 5882
5595 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5883 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5634,14 +5922,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
5634 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5922 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5635 dev_priv->rps.idle_freq); 5923 dev_priv->rps.idle_freq);
5636 5924
5637 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5925 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5638 5926
5639 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5927 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5640} 5928}
5641 5929
5642static void valleyview_enable_rps(struct drm_device *dev) 5930static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5643{ 5931{
5644 struct drm_i915_private *dev_priv = dev->dev_private;
5645 struct intel_engine_cs *engine; 5932 struct intel_engine_cs *engine;
5646 u32 gtfifodbg, val, rc6_mode = 0; 5933 u32 gtfifodbg, val, rc6_mode = 0;
5647 5934
@@ -5694,10 +5981,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
5694 VLV_MEDIA_RC6_COUNT_EN | 5981 VLV_MEDIA_RC6_COUNT_EN |
5695 VLV_RENDER_RC6_COUNT_EN)); 5982 VLV_RENDER_RC6_COUNT_EN));
5696 5983
5697 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5984 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5698 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5985 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5699 5986
5700 intel_print_rc6_info(dev, rc6_mode); 5987 intel_print_rc6_info(dev_priv, rc6_mode);
5701 5988
5702 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5989 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5703 5990
@@ -5724,7 +6011,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5724 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 6011 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5725 dev_priv->rps.idle_freq); 6012 dev_priv->rps.idle_freq);
5726 6013
5727 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 6014 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5728 6015
5729 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6016 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5730} 6017}
@@ -5814,10 +6101,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5814 6101
5815unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 6102unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5816{ 6103{
5817 struct drm_device *dev = dev_priv->dev;
5818 unsigned long val; 6104 unsigned long val;
5819 6105
5820 if (INTEL_INFO(dev)->gen != 5) 6106 if (INTEL_INFO(dev_priv)->gen != 5)
5821 return 0; 6107 return 0;
5822 6108
5823 spin_lock_irq(&mchdev_lock); 6109 spin_lock_irq(&mchdev_lock);
@@ -5857,11 +6143,10 @@ static int _pxvid_to_vd(u8 pxvid)
5857 6143
5858static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6144static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5859{ 6145{
5860 struct drm_device *dev = dev_priv->dev;
5861 const int vd = _pxvid_to_vd(pxvid); 6146 const int vd = _pxvid_to_vd(pxvid);
5862 const int vm = vd - 1125; 6147 const int vm = vd - 1125;
5863 6148
5864 if (INTEL_INFO(dev)->is_mobile) 6149 if (INTEL_INFO(dev_priv)->is_mobile)
5865 return vm > 0 ? vm : 0; 6150 return vm > 0 ? vm : 0;
5866 6151
5867 return vd; 6152 return vd;
@@ -5902,9 +6187,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5902 6187
5903void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6188void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5904{ 6189{
5905 struct drm_device *dev = dev_priv->dev; 6190 if (INTEL_INFO(dev_priv)->gen != 5)
5906
5907 if (INTEL_INFO(dev)->gen != 5)
5908 return; 6191 return;
5909 6192
5910 spin_lock_irq(&mchdev_lock); 6193 spin_lock_irq(&mchdev_lock);
@@ -5953,10 +6236,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5953 6236
5954unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6237unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5955{ 6238{
5956 struct drm_device *dev = dev_priv->dev;
5957 unsigned long val; 6239 unsigned long val;
5958 6240
5959 if (INTEL_INFO(dev)->gen != 5) 6241 if (INTEL_INFO(dev_priv)->gen != 5)
5960 return 0; 6242 return 0;
5961 6243
5962 spin_lock_irq(&mchdev_lock); 6244 spin_lock_irq(&mchdev_lock);
@@ -6097,7 +6379,7 @@ bool i915_gpu_turbo_disable(void)
6097 6379
6098 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6380 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6099 6381
6100 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 6382 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6101 ret = false; 6383 ret = false;
6102 6384
6103out_unlock: 6385out_unlock:
@@ -6145,9 +6427,8 @@ void intel_gpu_ips_teardown(void)
6145 spin_unlock_irq(&mchdev_lock); 6427 spin_unlock_irq(&mchdev_lock);
6146} 6428}
6147 6429
6148static void intel_init_emon(struct drm_device *dev) 6430static void intel_init_emon(struct drm_i915_private *dev_priv)
6149{ 6431{
6150 struct drm_i915_private *dev_priv = dev->dev_private;
6151 u32 lcfuse; 6432 u32 lcfuse;
6152 u8 pxw[16]; 6433 u8 pxw[16];
6153 int i; 6434 int i;
@@ -6216,10 +6497,8 @@ static void intel_init_emon(struct drm_device *dev)
6216 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6497 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6217} 6498}
6218 6499
6219void intel_init_gt_powersave(struct drm_device *dev) 6500void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6220{ 6501{
6221 struct drm_i915_private *dev_priv = dev->dev_private;
6222
6223 /* 6502 /*
6224 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6503 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6225 * requirement. 6504 * requirement.
@@ -6229,74 +6508,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
6229 intel_runtime_pm_get(dev_priv); 6508 intel_runtime_pm_get(dev_priv);
6230 } 6509 }
6231 6510
6232 if (IS_CHERRYVIEW(dev)) 6511 if (IS_CHERRYVIEW(dev_priv))
6233 cherryview_init_gt_powersave(dev); 6512 cherryview_init_gt_powersave(dev_priv);
6234 else if (IS_VALLEYVIEW(dev)) 6513 else if (IS_VALLEYVIEW(dev_priv))
6235 valleyview_init_gt_powersave(dev); 6514 valleyview_init_gt_powersave(dev_priv);
6236} 6515}
6237 6516
6238void intel_cleanup_gt_powersave(struct drm_device *dev) 6517void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6239{ 6518{
6240 struct drm_i915_private *dev_priv = dev->dev_private; 6519 if (IS_CHERRYVIEW(dev_priv))
6241
6242 if (IS_CHERRYVIEW(dev))
6243 return; 6520 return;
6244 else if (IS_VALLEYVIEW(dev)) 6521 else if (IS_VALLEYVIEW(dev_priv))
6245 valleyview_cleanup_gt_powersave(dev); 6522 valleyview_cleanup_gt_powersave(dev_priv);
6246 6523
6247 if (!i915.enable_rc6) 6524 if (!i915.enable_rc6)
6248 intel_runtime_pm_put(dev_priv); 6525 intel_runtime_pm_put(dev_priv);
6249} 6526}
6250 6527
6251static void gen6_suspend_rps(struct drm_device *dev) 6528static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
6252{ 6529{
6253 struct drm_i915_private *dev_priv = dev->dev_private;
6254
6255 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6530 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6256 6531
6257 gen6_disable_rps_interrupts(dev); 6532 gen6_disable_rps_interrupts(dev_priv);
6258} 6533}
6259 6534
6260/** 6535/**
6261 * intel_suspend_gt_powersave - suspend PM work and helper threads 6536 * intel_suspend_gt_powersave - suspend PM work and helper threads
6262 * @dev: drm device 6537 * @dev_priv: i915 device
6263 * 6538 *
6264 * We don't want to disable RC6 or other features here, we just want 6539 * We don't want to disable RC6 or other features here, we just want
6265 * to make sure any work we've queued has finished and won't bother 6540 * to make sure any work we've queued has finished and won't bother
6266 * us while we're suspended. 6541 * us while we're suspended.
6267 */ 6542 */
6268void intel_suspend_gt_powersave(struct drm_device *dev) 6543void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6269{ 6544{
6270 struct drm_i915_private *dev_priv = dev->dev_private; 6545 if (INTEL_GEN(dev_priv) < 6)
6271
6272 if (INTEL_INFO(dev)->gen < 6)
6273 return; 6546 return;
6274 6547
6275 gen6_suspend_rps(dev); 6548 gen6_suspend_rps(dev_priv);
6276 6549
6277 /* Force GPU to min freq during suspend */ 6550 /* Force GPU to min freq during suspend */
6278 gen6_rps_idle(dev_priv); 6551 gen6_rps_idle(dev_priv);
6279} 6552}
6280 6553
6281void intel_disable_gt_powersave(struct drm_device *dev) 6554void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6282{ 6555{
6283 struct drm_i915_private *dev_priv = dev->dev_private; 6556 if (IS_IRONLAKE_M(dev_priv)) {
6284 6557 ironlake_disable_drps(dev_priv);
6285 if (IS_IRONLAKE_M(dev)) { 6558 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6286 ironlake_disable_drps(dev); 6559 intel_suspend_gt_powersave(dev_priv);
6287 } else if (INTEL_INFO(dev)->gen >= 6) {
6288 intel_suspend_gt_powersave(dev);
6289 6560
6290 mutex_lock(&dev_priv->rps.hw_lock); 6561 mutex_lock(&dev_priv->rps.hw_lock);
6291 if (INTEL_INFO(dev)->gen >= 9) { 6562 if (INTEL_INFO(dev_priv)->gen >= 9) {
6292 gen9_disable_rc6(dev); 6563 gen9_disable_rc6(dev_priv);
6293 gen9_disable_rps(dev); 6564 gen9_disable_rps(dev_priv);
6294 } else if (IS_CHERRYVIEW(dev)) 6565 } else if (IS_CHERRYVIEW(dev_priv))
6295 cherryview_disable_rps(dev); 6566 cherryview_disable_rps(dev_priv);
6296 else if (IS_VALLEYVIEW(dev)) 6567 else if (IS_VALLEYVIEW(dev_priv))
6297 valleyview_disable_rps(dev); 6568 valleyview_disable_rps(dev_priv);
6298 else 6569 else
6299 gen6_disable_rps(dev); 6570 gen6_disable_rps(dev_priv);
6300 6571
6301 dev_priv->rps.enabled = false; 6572 dev_priv->rps.enabled = false;
6302 mutex_unlock(&dev_priv->rps.hw_lock); 6573 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6308,27 +6579,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6308 struct drm_i915_private *dev_priv = 6579 struct drm_i915_private *dev_priv =
6309 container_of(work, struct drm_i915_private, 6580 container_of(work, struct drm_i915_private,
6310 rps.delayed_resume_work.work); 6581 rps.delayed_resume_work.work);
6311 struct drm_device *dev = dev_priv->dev;
6312 6582
6313 mutex_lock(&dev_priv->rps.hw_lock); 6583 mutex_lock(&dev_priv->rps.hw_lock);
6314 6584
6315 gen6_reset_rps_interrupts(dev); 6585 gen6_reset_rps_interrupts(dev_priv);
6316 6586
6317 if (IS_CHERRYVIEW(dev)) { 6587 if (IS_CHERRYVIEW(dev_priv)) {
6318 cherryview_enable_rps(dev); 6588 cherryview_enable_rps(dev_priv);
6319 } else if (IS_VALLEYVIEW(dev)) { 6589 } else if (IS_VALLEYVIEW(dev_priv)) {
6320 valleyview_enable_rps(dev); 6590 valleyview_enable_rps(dev_priv);
6321 } else if (INTEL_INFO(dev)->gen >= 9) { 6591 } else if (INTEL_INFO(dev_priv)->gen >= 9) {
6322 gen9_enable_rc6(dev); 6592 gen9_enable_rc6(dev_priv);
6323 gen9_enable_rps(dev); 6593 gen9_enable_rps(dev_priv);
6324 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 6594 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6325 __gen6_update_ring_freq(dev); 6595 __gen6_update_ring_freq(dev_priv);
6326 } else if (IS_BROADWELL(dev)) { 6596 } else if (IS_BROADWELL(dev_priv)) {
6327 gen8_enable_rps(dev); 6597 gen8_enable_rps(dev_priv);
6328 __gen6_update_ring_freq(dev); 6598 __gen6_update_ring_freq(dev_priv);
6329 } else { 6599 } else {
6330 gen6_enable_rps(dev); 6600 gen6_enable_rps(dev_priv);
6331 __gen6_update_ring_freq(dev); 6601 __gen6_update_ring_freq(dev_priv);
6332 } 6602 }
6333 6603
6334 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6604 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6339,27 +6609,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6339 6609
6340 dev_priv->rps.enabled = true; 6610 dev_priv->rps.enabled = true;
6341 6611
6342 gen6_enable_rps_interrupts(dev); 6612 gen6_enable_rps_interrupts(dev_priv);
6343 6613
6344 mutex_unlock(&dev_priv->rps.hw_lock); 6614 mutex_unlock(&dev_priv->rps.hw_lock);
6345 6615
6346 intel_runtime_pm_put(dev_priv); 6616 intel_runtime_pm_put(dev_priv);
6347} 6617}
6348 6618
6349void intel_enable_gt_powersave(struct drm_device *dev) 6619void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6350{ 6620{
6351 struct drm_i915_private *dev_priv = dev->dev_private;
6352
6353 /* Powersaving is controlled by the host when inside a VM */ 6621 /* Powersaving is controlled by the host when inside a VM */
6354 if (intel_vgpu_active(dev)) 6622 if (intel_vgpu_active(dev_priv))
6355 return; 6623 return;
6356 6624
6357 if (IS_IRONLAKE_M(dev)) { 6625 if (IS_IRONLAKE_M(dev_priv)) {
6358 ironlake_enable_drps(dev); 6626 ironlake_enable_drps(dev_priv);
6359 mutex_lock(&dev->struct_mutex); 6627 mutex_lock(&dev_priv->dev->struct_mutex);
6360 intel_init_emon(dev); 6628 intel_init_emon(dev_priv);
6361 mutex_unlock(&dev->struct_mutex); 6629 mutex_unlock(&dev_priv->dev->struct_mutex);
6362 } else if (INTEL_INFO(dev)->gen >= 6) { 6630 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6363 /* 6631 /*
6364 * PCU communication is slow and this doesn't need to be 6632 * PCU communication is slow and this doesn't need to be
6365 * done at any specific time, so do this out of our fast path 6633 * done at any specific time, so do this out of our fast path
@@ -6378,14 +6646,12 @@ void intel_enable_gt_powersave(struct drm_device *dev)
6378 } 6646 }
6379} 6647}
6380 6648
6381void intel_reset_gt_powersave(struct drm_device *dev) 6649void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6382{ 6650{
6383 struct drm_i915_private *dev_priv = dev->dev_private; 6651 if (INTEL_INFO(dev_priv)->gen < 6)
6384
6385 if (INTEL_INFO(dev)->gen < 6)
6386 return; 6652 return;
6387 6653
6388 gen6_suspend_rps(dev); 6654 gen6_suspend_rps(dev_priv);
6389 dev_priv->rps.enabled = false; 6655 dev_priv->rps.enabled = false;
6390} 6656}
6391 6657
@@ -6698,11 +6964,69 @@ static void lpt_suspend_hw(struct drm_device *dev)
6698 } 6964 }
6699} 6965}
6700 6966
6967static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6968 int general_prio_credits,
6969 int high_prio_credits)
6970{
6971 u32 misccpctl;
6972
6973 /* WaTempDisableDOPClkGating:bdw */
6974 misccpctl = I915_READ(GEN7_MISCCPCTL);
6975 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6976
6977 I915_WRITE(GEN8_L3SQCREG1,
6978 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
6979 L3_HIGH_PRIO_CREDITS(high_prio_credits));
6980
6981 /*
6982 * Wait at least 100 clocks before re-enabling clock gating.
6983 * See the definition of L3SQCREG1 in BSpec.
6984 */
6985 POSTING_READ(GEN8_L3SQCREG1);
6986 udelay(1);
6987 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6988}
6989
6990static void kabylake_init_clock_gating(struct drm_device *dev)
6991{
6992 struct drm_i915_private *dev_priv = dev->dev_private;
6993
6994 gen9_init_clock_gating(dev);
6995
6996 /* WaDisableSDEUnitClockGating:kbl */
6997 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6998 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6999 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7000
7001 /* WaDisableGamClockGating:kbl */
7002 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7003 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7004 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7005
7006 /* WaFbcNukeOnHostModify:kbl */
7007 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7008 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7009}
7010
7011static void skylake_init_clock_gating(struct drm_device *dev)
7012{
7013 struct drm_i915_private *dev_priv = dev->dev_private;
7014
7015 gen9_init_clock_gating(dev);
7016
7017 /* WAC6entrylatency:skl */
7018 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7019 FBC_LLC_FULLY_OPEN);
7020
7021 /* WaFbcNukeOnHostModify:skl */
7022 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7023 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7024}
7025
6701static void broadwell_init_clock_gating(struct drm_device *dev) 7026static void broadwell_init_clock_gating(struct drm_device *dev)
6702{ 7027{
6703 struct drm_i915_private *dev_priv = dev->dev_private; 7028 struct drm_i915_private *dev_priv = dev->dev_private;
6704 enum pipe pipe; 7029 enum pipe pipe;
6705 uint32_t misccpctl;
6706 7030
6707 ilk_init_lp_watermarks(dev); 7031 ilk_init_lp_watermarks(dev);
6708 7032
@@ -6733,20 +7057,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6733 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7057 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6734 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7058 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6735 7059
6736 /* 7060 /* WaProgramL3SqcReg1Default:bdw */
6737 * WaProgramL3SqcReg1Default:bdw 7061 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6738 * WaTempDisableDOPClkGating:bdw
6739 */
6740 misccpctl = I915_READ(GEN7_MISCCPCTL);
6741 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6742 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6743 /*
6744 * Wait at least 100 clocks before re-enabling clock gating. See
6745 * the definition of L3SQCREG1 in BSpec.
6746 */
6747 POSTING_READ(GEN8_L3SQCREG1);
6748 udelay(1);
6749 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6750 7062
6751 /* 7063 /*
6752 * WaGttCachingOffByDefault:bdw 7064 * WaGttCachingOffByDefault:bdw
@@ -6755,6 +7067,10 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6755 */ 7067 */
6756 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7068 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6757 7069
7070 /* WaKVMNotificationOnConfigChange:bdw */
7071 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7072 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7073
6758 lpt_init_clock_gating(dev); 7074 lpt_init_clock_gating(dev);
6759} 7075}
6760 7076
@@ -7017,6 +7333,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7017 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7333 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7018 7334
7019 /* 7335 /*
7336 * WaProgramL3SqcReg1Default:chv
7337 * See gfxspecs/Related Documents/Performance Guide/
7338 * LSQC Setting Recommendations.
7339 */
7340 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7341
7342 /*
7020 * GTT cache may not work with big pages, so if those 7343 * GTT cache may not work with big pages, so if those
7021 * are ever enabled GTT cache may need to be disabled. 7344 * are ever enabled GTT cache may need to be disabled.
7022 */ 7345 */
@@ -7163,9 +7486,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
7163void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 7486void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7164{ 7487{
7165 if (IS_SKYLAKE(dev_priv)) 7488 if (IS_SKYLAKE(dev_priv))
7166 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7489 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7167 else if (IS_KABYLAKE(dev_priv)) 7490 else if (IS_KABYLAKE(dev_priv))
7168 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7491 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7169 else if (IS_BROXTON(dev_priv)) 7492 else if (IS_BROXTON(dev_priv))
7170 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 7493 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7171 else if (IS_BROADWELL(dev_priv)) 7494 else if (IS_BROADWELL(dev_priv))
@@ -7217,6 +7540,7 @@ void intel_init_pm(struct drm_device *dev)
7217 if (INTEL_INFO(dev)->gen >= 9) { 7540 if (INTEL_INFO(dev)->gen >= 9) {
7218 skl_setup_wm_latency(dev); 7541 skl_setup_wm_latency(dev);
7219 dev_priv->display.update_wm = skl_update_wm; 7542 dev_priv->display.update_wm = skl_update_wm;
7543 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7220 } else if (HAS_PCH_SPLIT(dev)) { 7544 } else if (HAS_PCH_SPLIT(dev)) {
7221 ilk_setup_wm_latency(dev); 7545 ilk_setup_wm_latency(dev);
7222 7546
@@ -7390,19 +7714,17 @@ static void __intel_rps_boost_work(struct work_struct *work)
7390 struct drm_i915_gem_request *req = boost->req; 7714 struct drm_i915_gem_request *req = boost->req;
7391 7715
7392 if (!i915_gem_request_completed(req, true)) 7716 if (!i915_gem_request_completed(req, true))
7393 gen6_rps_boost(to_i915(req->engine->dev), NULL, 7717 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7394 req->emitted_jiffies);
7395 7718
7396 i915_gem_request_unreference__unlocked(req); 7719 i915_gem_request_unreference(req);
7397 kfree(boost); 7720 kfree(boost);
7398} 7721}
7399 7722
7400void intel_queue_rps_boost_for_request(struct drm_device *dev, 7723void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7401 struct drm_i915_gem_request *req)
7402{ 7724{
7403 struct request_boost *boost; 7725 struct request_boost *boost;
7404 7726
7405 if (req == NULL || INTEL_INFO(dev)->gen < 6) 7727 if (req == NULL || INTEL_GEN(req->i915) < 6)
7406 return; 7728 return;
7407 7729
7408 if (i915_gem_request_completed(req, true)) 7730 if (i915_gem_request_completed(req, true))
@@ -7416,7 +7738,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
7416 boost->req = req; 7738 boost->req = req;
7417 7739
7418 INIT_WORK(&boost->work, __intel_rps_boost_work); 7740 INIT_WORK(&boost->work, __intel_rps_boost_work);
7419 queue_work(to_i915(dev)->wq, &boost->work); 7741 queue_work(req->i915->wq, &boost->work);
7420} 7742}
7421 7743
7422void intel_pm_setup(struct drm_device *dev) 7744void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..29a09bf6bd18 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
176 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = dev->dev_private;
177 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
178 i915_reg_t aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
179 int precharge = 0x3;
180 static const uint8_t aux_msg[] = { 179 static const uint8_t aux_msg[] = {
181 [0] = DP_AUX_NATIVE_WRITE << 4, 180 [0] = DP_AUX_NATIVE_WRITE << 4,
182 [1] = DP_SET_POWER >> 8, 181 [1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
185 [4] = DP_SET_POWER_D0, 184 [4] = DP_SET_POWER_D0,
186 }; 185 };
187 enum port port = dig_port->port; 186 enum port port = dig_port->port;
187 u32 aux_ctl;
188 int i; 188 int i;
189 189
190 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198 DP_AUX_FRAME_SYNC_ENABLE); 198 DP_AUX_FRAME_SYNC_ENABLE);
199 199
200 if (dev_priv->psr.link_standby)
201 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
202 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
203 else
204 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
205 DP_PSR_ENABLE);
206
200 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); 207 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201 208
202 /* Setup AUX registers */ 209 /* Setup AUX registers */
@@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
204 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), 211 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 212 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206 213
207 if (INTEL_INFO(dev)->gen >= 9) { 214 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
208 uint32_t val; 215 aux_clock_divider);
209 216 I915_WRITE(aux_ctl_reg, aux_ctl);
210 val = I915_READ(aux_ctl_reg);
211 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215 /* Use hardcoded data values for PSR, frame sync and GTC */
216 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219 I915_WRITE(aux_ctl_reg, val);
220 } else {
221 I915_WRITE(aux_ctl_reg,
222 DP_AUX_CH_CTL_TIME_OUT_400us |
223 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226 }
227
228 if (dev_priv->psr.link_standby)
229 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
230 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
231 else
232 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
233 DP_PSR_ENABLE);
234} 217}
235 218
236static void vlv_psr_enable_source(struct intel_dp *intel_dp) 219static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
272 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = dev->dev_private;
273 256
274 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
275 /* 258 /* Lately it was identified that depending on panel idle frame count
276 * Let's respect VBT in case VBT asks a higher idle_frame value. 259 * calculated at HW can be off by 1. So let's use what came
277 * Let's use 6 as the minimum to cover all known cases including 260 * from VBT + 1.
278 * the off-by-one issue that HW has in some cases. Also there are 261 * There are also other cases where panel demands at least 4
279 * cases where sink should be able to train 262 * but VBT is not being set. To cover these 2 cases lets use
280 * with the 5 or 6 idle patterns. 263 * at least 5 when VBT isn't set to be on the safest side.
281 */ 264 */
282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 265 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
283 uint32_t val = EDP_PSR_ENABLE; 266 uint32_t val = EDP_PSR_ENABLE;
284 267
285 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..fedd27049814 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
37int __intel_ring_space(int head, int tail, int size) 42int __intel_ring_space(int head, int tail, int size)
38{ 43{
39 int space = head - tail; 44 int space = head - tail;
@@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
55 60
56bool intel_engine_stopped(struct intel_engine_cs *engine) 61bool intel_engine_stopped(struct intel_engine_cs *engine)
57{ 62{
58 struct drm_i915_private *dev_priv = engine->dev->dev_private; 63 struct drm_i915_private *dev_priv = engine->i915;
59 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); 64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
60} 65}
61 66
@@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
101 u32 flush_domains) 106 u32 flush_domains)
102{ 107{
103 struct intel_engine_cs *engine = req->engine; 108 struct intel_engine_cs *engine = req->engine;
104 struct drm_device *dev = engine->dev;
105 u32 cmd; 109 u32 cmd;
106 int ret; 110 int ret;
107 111
@@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
140 cmd |= MI_EXE_FLUSH; 144 cmd |= MI_EXE_FLUSH;
141 145
142 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 146 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
143 (IS_G4X(dev) || IS_GEN5(dev))) 147 (IS_G4X(req->i915) || IS_GEN5(req->i915)))
144 cmd |= MI_INVALIDATE_ISP; 148 cmd |= MI_INVALIDATE_ISP;
145 149
146 ret = intel_ring_begin(req, 2); 150 ret = intel_ring_begin(req, 2);
@@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
426static void ring_write_tail(struct intel_engine_cs *engine, 430static void ring_write_tail(struct intel_engine_cs *engine,
427 u32 value) 431 u32 value)
428{ 432{
429 struct drm_i915_private *dev_priv = engine->dev->dev_private; 433 struct drm_i915_private *dev_priv = engine->i915;
430 I915_WRITE_TAIL(engine, value); 434 I915_WRITE_TAIL(engine, value);
431} 435}
432 436
433u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 437u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
434{ 438{
435 struct drm_i915_private *dev_priv = engine->dev->dev_private; 439 struct drm_i915_private *dev_priv = engine->i915;
436 u64 acthd; 440 u64 acthd;
437 441
438 if (INTEL_INFO(engine->dev)->gen >= 8) 442 if (INTEL_GEN(dev_priv) >= 8)
439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 443 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
440 RING_ACTHD_UDW(engine->mmio_base)); 444 RING_ACTHD_UDW(engine->mmio_base));
441 else if (INTEL_INFO(engine->dev)->gen >= 4) 445 else if (INTEL_GEN(dev_priv) >= 4)
442 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 446 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
443 else 447 else
444 acthd = I915_READ(ACTHD); 448 acthd = I915_READ(ACTHD);
@@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
448 452
449static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 453static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
450{ 454{
451 struct drm_i915_private *dev_priv = engine->dev->dev_private; 455 struct drm_i915_private *dev_priv = engine->i915;
452 u32 addr; 456 u32 addr;
453 457
454 addr = dev_priv->status_page_dmah->busaddr; 458 addr = dev_priv->status_page_dmah->busaddr;
455 if (INTEL_INFO(engine->dev)->gen >= 4) 459 if (INTEL_GEN(dev_priv) >= 4)
456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 460 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
457 I915_WRITE(HWS_PGA, addr); 461 I915_WRITE(HWS_PGA, addr);
458} 462}
459 463
460static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 464static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
461{ 465{
462 struct drm_device *dev = engine->dev; 466 struct drm_i915_private *dev_priv = engine->i915;
463 struct drm_i915_private *dev_priv = engine->dev->dev_private;
464 i915_reg_t mmio; 467 i915_reg_t mmio;
465 468
466 /* The ring status page addresses are no longer next to the rest of 469 /* The ring status page addresses are no longer next to the rest of
467 * the ring registers as of gen7. 470 * the ring registers as of gen7.
468 */ 471 */
469 if (IS_GEN7(dev)) { 472 if (IS_GEN7(dev_priv)) {
470 switch (engine->id) { 473 switch (engine->id) {
471 case RCS: 474 case RCS:
472 mmio = RENDER_HWS_PGA_GEN7; 475 mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
486 mmio = VEBOX_HWS_PGA_GEN7; 489 mmio = VEBOX_HWS_PGA_GEN7;
487 break; 490 break;
488 } 491 }
489 } else if (IS_GEN6(engine->dev)) { 492 } else if (IS_GEN6(dev_priv)) {
490 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 493 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
491 } else { 494 } else {
492 /* XXX: gen8 returns to sanity */ 495 /* XXX: gen8 returns to sanity */
@@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
503 * arises: do we still need this and if so how should we go about 506 * arises: do we still need this and if so how should we go about
504 * invalidating the TLB? 507 * invalidating the TLB?
505 */ 508 */
506 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 509 if (IS_GEN(dev_priv, 6, 7)) {
507 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 510 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
508 511
509 /* ring should be idle before issuing a sync flush*/ 512 /* ring should be idle before issuing a sync flush*/
@@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
521 524
522static bool stop_ring(struct intel_engine_cs *engine) 525static bool stop_ring(struct intel_engine_cs *engine)
523{ 526{
524 struct drm_i915_private *dev_priv = to_i915(engine->dev); 527 struct drm_i915_private *dev_priv = engine->i915;
525 528
526 if (!IS_GEN2(engine->dev)) { 529 if (!IS_GEN2(dev_priv)) {
527 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 530 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
528 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 531 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
529 DRM_ERROR("%s : timed out trying to stop ring\n", 532 DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
541 I915_WRITE_HEAD(engine, 0); 544 I915_WRITE_HEAD(engine, 0);
542 engine->write_tail(engine, 0); 545 engine->write_tail(engine, 0);
543 546
544 if (!IS_GEN2(engine->dev)) { 547 if (!IS_GEN2(dev_priv)) {
545 (void)I915_READ_CTL(engine); 548 (void)I915_READ_CTL(engine);
546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 549 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
547 } 550 }
@@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
556 559
557static int init_ring_common(struct intel_engine_cs *engine) 560static int init_ring_common(struct intel_engine_cs *engine)
558{ 561{
559 struct drm_device *dev = engine->dev; 562 struct drm_i915_private *dev_priv = engine->i915;
560 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct intel_ringbuffer *ringbuf = engine->buffer; 563 struct intel_ringbuffer *ringbuf = engine->buffer;
562 struct drm_i915_gem_object *obj = ringbuf->obj; 564 struct drm_i915_gem_object *obj = ringbuf->obj;
563 int ret = 0; 565 int ret = 0;
@@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
587 } 589 }
588 } 590 }
589 591
590 if (I915_NEED_GFX_HWS(dev)) 592 if (I915_NEED_GFX_HWS(dev_priv))
591 intel_ring_setup_status_page(engine); 593 intel_ring_setup_status_page(engine);
592 else 594 else
593 ring_setup_phys_status_page(engine); 595 ring_setup_phys_status_page(engine);
@@ -644,12 +646,10 @@ out:
644void 646void
645intel_fini_pipe_control(struct intel_engine_cs *engine) 647intel_fini_pipe_control(struct intel_engine_cs *engine)
646{ 648{
647 struct drm_device *dev = engine->dev;
648
649 if (engine->scratch.obj == NULL) 649 if (engine->scratch.obj == NULL)
650 return; 650 return;
651 651
652 if (INTEL_INFO(dev)->gen >= 5) { 652 if (INTEL_GEN(engine->i915) >= 5) {
653 kunmap(sg_page(engine->scratch.obj->pages->sgl)); 653 kunmap(sg_page(engine->scratch.obj->pages->sgl));
654 i915_gem_object_ggtt_unpin(engine->scratch.obj); 654 i915_gem_object_ggtt_unpin(engine->scratch.obj);
655 } 655 }
@@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
665 665
666 WARN_ON(engine->scratch.obj); 666 WARN_ON(engine->scratch.obj);
667 667
668 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); 668 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
669 if (engine->scratch.obj == NULL) { 669 if (IS_ERR(engine->scratch.obj)) {
670 DRM_ERROR("Failed to allocate seqno page\n"); 670 DRM_ERROR("Failed to allocate seqno page\n");
671 ret = -ENOMEM; 671 ret = PTR_ERR(engine->scratch.obj);
672 engine->scratch.obj = NULL;
672 goto err; 673 goto err;
673 } 674 }
674 675
@@ -702,11 +703,9 @@ err:
702 703
703static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 704static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
704{ 705{
705 int ret, i;
706 struct intel_engine_cs *engine = req->engine; 706 struct intel_engine_cs *engine = req->engine;
707 struct drm_device *dev = engine->dev; 707 struct i915_workarounds *w = &req->i915->workarounds;
708 struct drm_i915_private *dev_priv = dev->dev_private; 708 int ret, i;
709 struct i915_workarounds *w = &dev_priv->workarounds;
710 709
711 if (w->count == 0) 710 if (w->count == 0)
712 return 0; 711 return 0;
@@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
795static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 794static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
796 i915_reg_t reg) 795 i915_reg_t reg)
797{ 796{
798 struct drm_i915_private *dev_priv = engine->dev->dev_private; 797 struct drm_i915_private *dev_priv = engine->i915;
799 struct i915_workarounds *wa = &dev_priv->workarounds; 798 struct i915_workarounds *wa = &dev_priv->workarounds;
800 const uint32_t index = wa->hw_whitelist_count[engine->id]; 799 const uint32_t index = wa->hw_whitelist_count[engine->id];
801 800
@@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
811 810
812static int gen8_init_workarounds(struct intel_engine_cs *engine) 811static int gen8_init_workarounds(struct intel_engine_cs *engine)
813{ 812{
814 struct drm_device *dev = engine->dev; 813 struct drm_i915_private *dev_priv = engine->i915;
815 struct drm_i915_private *dev_priv = dev->dev_private;
816 814
817 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 815 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
818 816
@@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
863 861
864static int bdw_init_workarounds(struct intel_engine_cs *engine) 862static int bdw_init_workarounds(struct intel_engine_cs *engine)
865{ 863{
864 struct drm_i915_private *dev_priv = engine->i915;
866 int ret; 865 int ret;
867 struct drm_device *dev = engine->dev;
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 866
870 ret = gen8_init_workarounds(engine); 867 ret = gen8_init_workarounds(engine);
871 if (ret) 868 if (ret)
@@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
885 /* WaForceContextSaveRestoreNonCoherent:bdw */ 882 /* WaForceContextSaveRestoreNonCoherent:bdw */
886 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 883 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
887 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 884 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
888 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 885 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
889 886
890 return 0; 887 return 0;
891} 888}
892 889
893static int chv_init_workarounds(struct intel_engine_cs *engine) 890static int chv_init_workarounds(struct intel_engine_cs *engine)
894{ 891{
892 struct drm_i915_private *dev_priv = engine->i915;
895 int ret; 893 int ret;
896 struct drm_device *dev = engine->dev;
897 struct drm_i915_private *dev_priv = dev->dev_private;
898 894
899 ret = gen8_init_workarounds(engine); 895 ret = gen8_init_workarounds(engine);
900 if (ret) 896 if (ret)
@@ -911,38 +907,39 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
911 907
912static int gen9_init_workarounds(struct intel_engine_cs *engine) 908static int gen9_init_workarounds(struct intel_engine_cs *engine)
913{ 909{
914 struct drm_device *dev = engine->dev; 910 struct drm_i915_private *dev_priv = engine->i915;
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 uint32_t tmp;
917 int ret; 911 int ret;
918 912
919 /* WaEnableLbsSlaRetryTimerDecrement:skl */ 913 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
914 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
915
916 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
920 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 917 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
921 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 918 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
922 919
923 /* WaDisableKillLogic:bxt,skl */ 920 /* WaDisableKillLogic:bxt,skl,kbl */
924 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 921 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
925 ECOCHK_DIS_TLB); 922 ECOCHK_DIS_TLB);
926 923
927 /* WaClearFlowControlGpgpuContextSave:skl,bxt */ 924 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
928 /* WaDisablePartialInstShootdown:skl,bxt */ 925 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
929 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 926 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
930 FLOW_CONTROL_ENABLE | 927 FLOW_CONTROL_ENABLE |
931 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 928 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
932 929
933 /* Syncing dependencies between camera and graphics:skl,bxt */ 930 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
934 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 931 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
935 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 932 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
936 933
937 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 934 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
938 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 935 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
939 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 936 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
940 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 937 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
941 GEN9_DG_MIRROR_FIX_ENABLE); 938 GEN9_DG_MIRROR_FIX_ENABLE);
942 939
943 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 940 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
944 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 941 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
945 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 942 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
946 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 943 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
947 GEN9_RHWO_OPTIMIZATION_DISABLE); 944 GEN9_RHWO_OPTIMIZATION_DISABLE);
948 /* 945 /*
@@ -952,52 +949,78 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
952 */ 949 */
953 } 950 }
954 951
955 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ 952 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
956 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ 953 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
957 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 954 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
958 GEN9_ENABLE_YV12_BUGFIX | 955 GEN9_ENABLE_YV12_BUGFIX |
959 GEN9_ENABLE_GPGPU_PREEMPTION); 956 GEN9_ENABLE_GPGPU_PREEMPTION);
960 957
961 /* Wa4x4STCOptimizationDisable:skl,bxt */ 958 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
962 /* WaDisablePartialResolveInVc:skl,bxt */ 959 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
963 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 960 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
964 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 961 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
965 962
966 /* WaCcsTlbPrefetchDisable:skl,bxt */ 963 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
967 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 964 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
968 GEN9_CCS_TLB_PREFETCH_ENABLE); 965 GEN9_CCS_TLB_PREFETCH_ENABLE);
969 966
970 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 967 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
971 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 968 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
972 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 969 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
973 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 970 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
974 PIXEL_MASK_CAMMING_DISABLE); 971 PIXEL_MASK_CAMMING_DISABLE);
975 972
976 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 973 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
977 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 974 WA_SET_BIT_MASKED(HDC_CHICKEN0,
978 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || 975 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
979 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 976 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
980 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 977
981 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 978 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
979 * both tied to WaForceContextSaveRestoreNonCoherent
980 * in some hsds for skl. We keep the tie for all gen9. The
981 * documentation is a bit hazy and so we want to get common behaviour,
982 * even though there is no clear evidence we would need both on kbl/bxt.
983 * This area has been source of system hangs so we play it safe
984 * and mimic the skl regardless of what bspec says.
985 *
986 * Use Force Non-Coherent whenever executing a 3D context. This
987 * is a workaround for a possible hang in the unlikely event
988 * a TLB invalidation occurs during a PSD flush.
989 */
990
991 /* WaForceEnableNonCoherent:skl,bxt,kbl */
992 WA_SET_BIT_MASKED(HDC_CHICKEN0,
993 HDC_FORCE_NON_COHERENT);
982 994
983 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 995 /* WaDisableHDCInvalidation:skl,bxt,kbl */
984 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) 996 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
997 BDW_DISABLE_HDC_INVALIDATION);
998
999 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
1000 if (IS_SKYLAKE(dev_priv) ||
1001 IS_KABYLAKE(dev_priv) ||
1002 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
985 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 1003 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
986 GEN8_SAMPLER_POWER_BYPASS_DIS); 1004 GEN8_SAMPLER_POWER_BYPASS_DIS);
987 1005
988 /* WaDisableSTUnitPowerOptimization:skl,bxt */ 1006 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
989 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 1007 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
990 1008
991 /* WaOCLCoherentLineFlush:skl,bxt */ 1009 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
992 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | 1010 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
993 GEN8_LQSC_FLUSH_COHERENT_LINES)); 1011 GEN8_LQSC_FLUSH_COHERENT_LINES));
994 1012
995 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ 1013 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
1014 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1015 if (ret)
1016 return ret;
1017
1018 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
996 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); 1019 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
997 if (ret) 1020 if (ret)
998 return ret; 1021 return ret;
999 1022
1000 /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ 1023 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
1001 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); 1024 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1002 if (ret) 1025 if (ret)
1003 return ret; 1026 return ret;
@@ -1007,8 +1030,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1007 1030
1008static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1031static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1009{ 1032{
1010 struct drm_device *dev = engine->dev; 1033 struct drm_i915_private *dev_priv = engine->i915;
1011 struct drm_i915_private *dev_priv = dev->dev_private;
1012 u8 vals[3] = { 0, 0, 0 }; 1034 u8 vals[3] = { 0, 0, 0 };
1013 unsigned int i; 1035 unsigned int i;
1014 1036
@@ -1049,9 +1071,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1049 1071
1050static int skl_init_workarounds(struct intel_engine_cs *engine) 1072static int skl_init_workarounds(struct intel_engine_cs *engine)
1051{ 1073{
1074 struct drm_i915_private *dev_priv = engine->i915;
1052 int ret; 1075 int ret;
1053 struct drm_device *dev = engine->dev;
1054 struct drm_i915_private *dev_priv = dev->dev_private;
1055 1076
1056 ret = gen9_init_workarounds(engine); 1077 ret = gen9_init_workarounds(engine);
1057 if (ret) 1078 if (ret)
@@ -1062,12 +1083,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1062 * until D0 which is the default case so this is equivalent to 1083 * until D0 which is the default case so this is equivalent to
1063 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1084 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1064 */ 1085 */
1065 if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { 1086 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1066 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1087 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1067 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1088 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1068 } 1089 }
1069 1090
1070 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1091 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
1071 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1092 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1072 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1093 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1073 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1094 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1076,50 +1097,37 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1076 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1097 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1077 * involving this register should also be added to WA batch as required. 1098 * involving this register should also be added to WA batch as required.
1078 */ 1099 */
1079 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1100 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1080 /* WaDisableLSQCROPERFforOCL:skl */ 1101 /* WaDisableLSQCROPERFforOCL:skl */
1081 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1102 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1082 GEN8_LQSC_RO_PERF_DIS); 1103 GEN8_LQSC_RO_PERF_DIS);
1083 1104
1084 /* WaEnableGapsTsvCreditFix:skl */ 1105 /* WaEnableGapsTsvCreditFix:skl */
1085 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1106 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1086 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1107 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1087 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1108 GEN9_GAPS_TSV_CREDIT_DISABLE));
1088 } 1109 }
1089 1110
1090 /* WaDisablePowerCompilerClockGating:skl */ 1111 /* WaDisablePowerCompilerClockGating:skl */
1091 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1112 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1092 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1113 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1093 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1114 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1094 1115
1095 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1096 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1097 /*
1098 *Use Force Non-Coherent whenever executing a 3D context. This
1099 * is a workaround for a possible hang in the unlikely event
1100 * a TLB invalidation occurs during a PSD flush.
1101 */
1102 /* WaForceEnableNonCoherent:skl */
1103 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1104 HDC_FORCE_NON_COHERENT);
1105
1106 /* WaDisableHDCInvalidation:skl */
1107 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1108 BDW_DISABLE_HDC_INVALIDATION);
1109 }
1110
1111 /* WaBarrierPerformanceFixDisable:skl */ 1116 /* WaBarrierPerformanceFixDisable:skl */
1112 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1117 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1113 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1118 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1114 HDC_FENCE_DEST_SLM_DISABLE | 1119 HDC_FENCE_DEST_SLM_DISABLE |
1115 HDC_BARRIER_PERFORMANCE_DISABLE); 1120 HDC_BARRIER_PERFORMANCE_DISABLE);
1116 1121
1117 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1122 /* WaDisableSbeCacheDispatchPortSharing:skl */
1118 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1123 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1119 WA_SET_BIT_MASKED( 1124 WA_SET_BIT_MASKED(
1120 GEN7_HALF_SLICE_CHICKEN1, 1125 GEN7_HALF_SLICE_CHICKEN1,
1121 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1126 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1122 1127
1128 /* WaDisableGafsUnitClkGating:skl */
1129 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1130
1123 /* WaDisableLSQCROPERFforOCL:skl */ 1131 /* WaDisableLSQCROPERFforOCL:skl */
1124 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1132 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1125 if (ret) 1133 if (ret)
@@ -1130,9 +1138,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1130 1138
1131static int bxt_init_workarounds(struct intel_engine_cs *engine) 1139static int bxt_init_workarounds(struct intel_engine_cs *engine)
1132{ 1140{
1141 struct drm_i915_private *dev_priv = engine->i915;
1133 int ret; 1142 int ret;
1134 struct drm_device *dev = engine->dev;
1135 struct drm_i915_private *dev_priv = dev->dev_private;
1136 1143
1137 ret = gen9_init_workarounds(engine); 1144 ret = gen9_init_workarounds(engine);
1138 if (ret) 1145 if (ret)
@@ -1140,11 +1147,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1140 1147
1141 /* WaStoreMultiplePTEenable:bxt */ 1148 /* WaStoreMultiplePTEenable:bxt */
1142 /* This is a requirement according to Hardware specification */ 1149 /* This is a requirement according to Hardware specification */
1143 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1150 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1144 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1151 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1145 1152
1146 /* WaSetClckGatingDisableMedia:bxt */ 1153 /* WaSetClckGatingDisableMedia:bxt */
1147 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1154 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1148 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1155 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1149 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1156 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1150 } 1157 }
@@ -1153,8 +1160,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1153 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1160 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1154 STALL_DOP_GATING_DISABLE); 1161 STALL_DOP_GATING_DISABLE);
1155 1162
1163 /* WaDisablePooledEuLoadBalancingFix:bxt */
1164 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1165 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1166 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1167 }
1168
1156 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1169 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1157 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1170 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1158 WA_SET_BIT_MASKED( 1171 WA_SET_BIT_MASKED(
1159 GEN7_HALF_SLICE_CHICKEN1, 1172 GEN7_HALF_SLICE_CHICKEN1,
1160 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1173 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1164,7 +1177,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1164 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1177 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1165 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1178 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1166 /* WaDisableLSQCROPERFforOCL:bxt */ 1179 /* WaDisableLSQCROPERFforOCL:bxt */
1167 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1180 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1168 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1181 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1169 if (ret) 1182 if (ret)
1170 return ret; 1183 return ret;
@@ -1174,44 +1187,107 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1174 return ret; 1187 return ret;
1175 } 1188 }
1176 1189
1190 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1191 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1192 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1193 L3_HIGH_PRIO_CREDITS(2));
1194
1195 /* WaInsertDummyPushConstPs:bxt */
1196 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1197 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1198 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1199
1200 return 0;
1201}
1202
1203static int kbl_init_workarounds(struct intel_engine_cs *engine)
1204{
1205 struct drm_i915_private *dev_priv = engine->i915;
1206 int ret;
1207
1208 ret = gen9_init_workarounds(engine);
1209 if (ret)
1210 return ret;
1211
1212 /* WaEnableGapsTsvCreditFix:kbl */
1213 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1214 GEN9_GAPS_TSV_CREDIT_DISABLE));
1215
1216 /* WaDisableDynamicCreditSharing:kbl */
1217 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1218 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1219 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1220
1221 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1222 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1223 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1224 HDC_FENCE_DEST_SLM_DISABLE);
1225
1226 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1227 * involving this register should also be added to WA batch as required.
1228 */
1229 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1230 /* WaDisableLSQCROPERFforOCL:kbl */
1231 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1232 GEN8_LQSC_RO_PERF_DIS);
1233
1234 /* WaInsertDummyPushConstPs:kbl */
1235 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1236 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1237 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1238
1239 /* WaDisableGafsUnitClkGating:kbl */
1240 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1241
1242 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1243 WA_SET_BIT_MASKED(
1244 GEN7_HALF_SLICE_CHICKEN1,
1245 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1246
1247 /* WaDisableLSQCROPERFforOCL:kbl */
1248 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1249 if (ret)
1250 return ret;
1251
1177 return 0; 1252 return 0;
1178} 1253}
1179 1254
1180int init_workarounds_ring(struct intel_engine_cs *engine) 1255int init_workarounds_ring(struct intel_engine_cs *engine)
1181{ 1256{
1182 struct drm_device *dev = engine->dev; 1257 struct drm_i915_private *dev_priv = engine->i915;
1183 struct drm_i915_private *dev_priv = dev->dev_private;
1184 1258
1185 WARN_ON(engine->id != RCS); 1259 WARN_ON(engine->id != RCS);
1186 1260
1187 dev_priv->workarounds.count = 0; 1261 dev_priv->workarounds.count = 0;
1188 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1262 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1189 1263
1190 if (IS_BROADWELL(dev)) 1264 if (IS_BROADWELL(dev_priv))
1191 return bdw_init_workarounds(engine); 1265 return bdw_init_workarounds(engine);
1192 1266
1193 if (IS_CHERRYVIEW(dev)) 1267 if (IS_CHERRYVIEW(dev_priv))
1194 return chv_init_workarounds(engine); 1268 return chv_init_workarounds(engine);
1195 1269
1196 if (IS_SKYLAKE(dev)) 1270 if (IS_SKYLAKE(dev_priv))
1197 return skl_init_workarounds(engine); 1271 return skl_init_workarounds(engine);
1198 1272
1199 if (IS_BROXTON(dev)) 1273 if (IS_BROXTON(dev_priv))
1200 return bxt_init_workarounds(engine); 1274 return bxt_init_workarounds(engine);
1201 1275
1276 if (IS_KABYLAKE(dev_priv))
1277 return kbl_init_workarounds(engine);
1278
1202 return 0; 1279 return 0;
1203} 1280}
1204 1281
1205static int init_render_ring(struct intel_engine_cs *engine) 1282static int init_render_ring(struct intel_engine_cs *engine)
1206{ 1283{
1207 struct drm_device *dev = engine->dev; 1284 struct drm_i915_private *dev_priv = engine->i915;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 int ret = init_ring_common(engine); 1285 int ret = init_ring_common(engine);
1210 if (ret) 1286 if (ret)
1211 return ret; 1287 return ret;
1212 1288
1213 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1289 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1214 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1290 if (IS_GEN(dev_priv, 4, 6))
1215 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1291 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1216 1292
1217 /* We need to disable the AsyncFlip performance optimisations in order 1293 /* We need to disable the AsyncFlip performance optimisations in order
@@ -1220,22 +1296,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
1220 * 1296 *
1221 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1297 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1222 */ 1298 */
1223 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1299 if (IS_GEN(dev_priv, 6, 7))
1224 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1300 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1225 1301
1226 /* Required for the hardware to program scanline values for waiting */ 1302 /* Required for the hardware to program scanline values for waiting */
1227 /* WaEnableFlushTlbInvalidationMode:snb */ 1303 /* WaEnableFlushTlbInvalidationMode:snb */
1228 if (INTEL_INFO(dev)->gen == 6) 1304 if (IS_GEN6(dev_priv))
1229 I915_WRITE(GFX_MODE, 1305 I915_WRITE(GFX_MODE,
1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1306 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1231 1307
1232 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1308 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1233 if (IS_GEN7(dev)) 1309 if (IS_GEN7(dev_priv))
1234 I915_WRITE(GFX_MODE_GEN7, 1310 I915_WRITE(GFX_MODE_GEN7,
1235 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1311 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1236 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1312 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1237 1313
1238 if (IS_GEN6(dev)) { 1314 if (IS_GEN6(dev_priv)) {
1239 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1315 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1240 * "If this bit is set, STCunit will have LRA as replacement 1316 * "If this bit is set, STCunit will have LRA as replacement
1241 * policy. [...] This bit must be reset. LRA replacement 1317 * policy. [...] This bit must be reset. LRA replacement
@@ -1245,19 +1321,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
1245 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1321 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1246 } 1322 }
1247 1323
1248 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1324 if (IS_GEN(dev_priv, 6, 7))
1249 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1325 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1250 1326
1251 if (HAS_L3_DPF(dev)) 1327 if (HAS_L3_DPF(dev_priv))
1252 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1328 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1253 1329
1254 return init_workarounds_ring(engine); 1330 return init_workarounds_ring(engine);
1255} 1331}
1256 1332
1257static void render_ring_cleanup(struct intel_engine_cs *engine) 1333static void render_ring_cleanup(struct intel_engine_cs *engine)
1258{ 1334{
1259 struct drm_device *dev = engine->dev; 1335 struct drm_i915_private *dev_priv = engine->i915;
1260 struct drm_i915_private *dev_priv = dev->dev_private;
1261 1336
1262 if (dev_priv->semaphore_obj) { 1337 if (dev_priv->semaphore_obj) {
1263 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1338 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1273,13 +1348,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1273{ 1348{
1274#define MBOX_UPDATE_DWORDS 8 1349#define MBOX_UPDATE_DWORDS 8
1275 struct intel_engine_cs *signaller = signaller_req->engine; 1350 struct intel_engine_cs *signaller = signaller_req->engine;
1276 struct drm_device *dev = signaller->dev; 1351 struct drm_i915_private *dev_priv = signaller_req->i915;
1277 struct drm_i915_private *dev_priv = dev->dev_private;
1278 struct intel_engine_cs *waiter; 1352 struct intel_engine_cs *waiter;
1279 enum intel_engine_id id; 1353 enum intel_engine_id id;
1280 int ret, num_rings; 1354 int ret, num_rings;
1281 1355
1282 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1356 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1283 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1357 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1284#undef MBOX_UPDATE_DWORDS 1358#undef MBOX_UPDATE_DWORDS
1285 1359
@@ -1297,7 +1371,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1297 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1371 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1298 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1372 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1299 PIPE_CONTROL_QW_WRITE | 1373 PIPE_CONTROL_QW_WRITE |
1300 PIPE_CONTROL_FLUSH_ENABLE); 1374 PIPE_CONTROL_CS_STALL);
1301 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1375 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1302 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1376 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1303 intel_ring_emit(signaller, seqno); 1377 intel_ring_emit(signaller, seqno);
@@ -1315,13 +1389,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1315{ 1389{
1316#define MBOX_UPDATE_DWORDS 6 1390#define MBOX_UPDATE_DWORDS 6
1317 struct intel_engine_cs *signaller = signaller_req->engine; 1391 struct intel_engine_cs *signaller = signaller_req->engine;
1318 struct drm_device *dev = signaller->dev; 1392 struct drm_i915_private *dev_priv = signaller_req->i915;
1319 struct drm_i915_private *dev_priv = dev->dev_private;
1320 struct intel_engine_cs *waiter; 1393 struct intel_engine_cs *waiter;
1321 enum intel_engine_id id; 1394 enum intel_engine_id id;
1322 int ret, num_rings; 1395 int ret, num_rings;
1323 1396
1324 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1397 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1325 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1398 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1326#undef MBOX_UPDATE_DWORDS 1399#undef MBOX_UPDATE_DWORDS
1327 1400
@@ -1354,14 +1427,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1354 unsigned int num_dwords) 1427 unsigned int num_dwords)
1355{ 1428{
1356 struct intel_engine_cs *signaller = signaller_req->engine; 1429 struct intel_engine_cs *signaller = signaller_req->engine;
1357 struct drm_device *dev = signaller->dev; 1430 struct drm_i915_private *dev_priv = signaller_req->i915;
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_engine_cs *useless; 1431 struct intel_engine_cs *useless;
1360 enum intel_engine_id id; 1432 enum intel_engine_id id;
1361 int ret, num_rings; 1433 int ret, num_rings;
1362 1434
1363#define MBOX_UPDATE_DWORDS 3 1435#define MBOX_UPDATE_DWORDS 3
1364 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1436 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1365 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1437 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1366#undef MBOX_UPDATE_DWORDS 1438#undef MBOX_UPDATE_DWORDS
1367 1439
@@ -1420,10 +1492,38 @@ gen6_add_request(struct drm_i915_gem_request *req)
1420 return 0; 1492 return 0;
1421} 1493}
1422 1494
1423static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1495static int
1496gen8_render_add_request(struct drm_i915_gem_request *req)
1497{
1498 struct intel_engine_cs *engine = req->engine;
1499 int ret;
1500
1501 if (engine->semaphore.signal)
1502 ret = engine->semaphore.signal(req, 8);
1503 else
1504 ret = intel_ring_begin(req, 8);
1505 if (ret)
1506 return ret;
1507
1508 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
1509 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1510 PIPE_CONTROL_CS_STALL |
1511 PIPE_CONTROL_QW_WRITE));
1512 intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
1513 intel_ring_emit(engine, 0);
1514 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1515 /* We're thrashing one dword of HWS. */
1516 intel_ring_emit(engine, 0);
1517 intel_ring_emit(engine, MI_USER_INTERRUPT);
1518 intel_ring_emit(engine, MI_NOOP);
1519 __intel_ring_advance(engine);
1520
1521 return 0;
1522}
1523
1524static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
1424 u32 seqno) 1525 u32 seqno)
1425{ 1526{
1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 return dev_priv->last_seqno < seqno; 1527 return dev_priv->last_seqno < seqno;
1428} 1528}
1429 1529
@@ -1441,7 +1541,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1441 u32 seqno) 1541 u32 seqno)
1442{ 1542{
1443 struct intel_engine_cs *waiter = waiter_req->engine; 1543 struct intel_engine_cs *waiter = waiter_req->engine;
1444 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1544 struct drm_i915_private *dev_priv = waiter_req->i915;
1545 struct i915_hw_ppgtt *ppgtt;
1445 int ret; 1546 int ret;
1446 1547
1447 ret = intel_ring_begin(waiter_req, 4); 1548 ret = intel_ring_begin(waiter_req, 4);
@@ -1450,7 +1551,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1450 1551
1451 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1552 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1452 MI_SEMAPHORE_GLOBAL_GTT | 1553 MI_SEMAPHORE_GLOBAL_GTT |
1453 MI_SEMAPHORE_POLL |
1454 MI_SEMAPHORE_SAD_GTE_SDD); 1554 MI_SEMAPHORE_SAD_GTE_SDD);
1455 intel_ring_emit(waiter, seqno); 1555 intel_ring_emit(waiter, seqno);
1456 intel_ring_emit(waiter, 1556 intel_ring_emit(waiter,
@@ -1458,6 +1558,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1458 intel_ring_emit(waiter, 1558 intel_ring_emit(waiter,
1459 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1559 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1460 intel_ring_advance(waiter); 1560 intel_ring_advance(waiter);
1561
1562 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1563 * pagetables and we must reload them before executing the batch.
1564 * We do this on the i915_switch_context() following the wait and
1565 * before the dispatch.
1566 */
1567 ppgtt = waiter_req->ctx->ppgtt;
1568 if (ppgtt && waiter_req->engine->id != RCS)
1569 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
1461 return 0; 1570 return 0;
1462} 1571}
1463 1572
@@ -1486,7 +1595,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1486 return ret; 1595 return ret;
1487 1596
1488 /* If seqno wrap happened, omit the wait with no-ops */ 1597 /* If seqno wrap happened, omit the wait with no-ops */
1489 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1598 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
1490 intel_ring_emit(waiter, dw1 | wait_mbox); 1599 intel_ring_emit(waiter, dw1 | wait_mbox);
1491 intel_ring_emit(waiter, seqno); 1600 intel_ring_emit(waiter, seqno);
1492 intel_ring_emit(waiter, 0); 1601 intel_ring_emit(waiter, 0);
@@ -1567,7 +1676,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1567static void 1676static void
1568gen6_seqno_barrier(struct intel_engine_cs *engine) 1677gen6_seqno_barrier(struct intel_engine_cs *engine)
1569{ 1678{
1570 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1679 struct drm_i915_private *dev_priv = engine->i915;
1571 1680
1572 /* Workaround to force correct ordering between irq and seqno writes on 1681 /* Workaround to force correct ordering between irq and seqno writes on
1573 * ivb (and maybe also on snb) by reading from a CS register (like 1682 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1616,8 +1725,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1616static bool 1725static bool
1617gen5_ring_get_irq(struct intel_engine_cs *engine) 1726gen5_ring_get_irq(struct intel_engine_cs *engine)
1618{ 1727{
1619 struct drm_device *dev = engine->dev; 1728 struct drm_i915_private *dev_priv = engine->i915;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621 unsigned long flags; 1729 unsigned long flags;
1622 1730
1623 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1731 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1634,8 +1742,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
1634static void 1742static void
1635gen5_ring_put_irq(struct intel_engine_cs *engine) 1743gen5_ring_put_irq(struct intel_engine_cs *engine)
1636{ 1744{
1637 struct drm_device *dev = engine->dev; 1745 struct drm_i915_private *dev_priv = engine->i915;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 unsigned long flags; 1746 unsigned long flags;
1640 1747
1641 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1748 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1647,8 +1754,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
1647static bool 1754static bool
1648i9xx_ring_get_irq(struct intel_engine_cs *engine) 1755i9xx_ring_get_irq(struct intel_engine_cs *engine)
1649{ 1756{
1650 struct drm_device *dev = engine->dev; 1757 struct drm_i915_private *dev_priv = engine->i915;
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 unsigned long flags; 1758 unsigned long flags;
1653 1759
1654 if (!intel_irqs_enabled(dev_priv)) 1760 if (!intel_irqs_enabled(dev_priv))
@@ -1668,8 +1774,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
1668static void 1774static void
1669i9xx_ring_put_irq(struct intel_engine_cs *engine) 1775i9xx_ring_put_irq(struct intel_engine_cs *engine)
1670{ 1776{
1671 struct drm_device *dev = engine->dev; 1777 struct drm_i915_private *dev_priv = engine->i915;
1672 struct drm_i915_private *dev_priv = dev->dev_private;
1673 unsigned long flags; 1778 unsigned long flags;
1674 1779
1675 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1780 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1684,8 +1789,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
1684static bool 1789static bool
1685i8xx_ring_get_irq(struct intel_engine_cs *engine) 1790i8xx_ring_get_irq(struct intel_engine_cs *engine)
1686{ 1791{
1687 struct drm_device *dev = engine->dev; 1792 struct drm_i915_private *dev_priv = engine->i915;
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 unsigned long flags; 1793 unsigned long flags;
1690 1794
1691 if (!intel_irqs_enabled(dev_priv)) 1795 if (!intel_irqs_enabled(dev_priv))
@@ -1705,8 +1809,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
1705static void 1809static void
1706i8xx_ring_put_irq(struct intel_engine_cs *engine) 1810i8xx_ring_put_irq(struct intel_engine_cs *engine)
1707{ 1811{
1708 struct drm_device *dev = engine->dev; 1812 struct drm_i915_private *dev_priv = engine->i915;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1710 unsigned long flags; 1813 unsigned long flags;
1711 1814
1712 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1815 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1759,8 +1862,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1759static bool 1862static bool
1760gen6_ring_get_irq(struct intel_engine_cs *engine) 1863gen6_ring_get_irq(struct intel_engine_cs *engine)
1761{ 1864{
1762 struct drm_device *dev = engine->dev; 1865 struct drm_i915_private *dev_priv = engine->i915;
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 unsigned long flags; 1866 unsigned long flags;
1765 1867
1766 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1868 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1768,10 +1870,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1768 1870
1769 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1871 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1770 if (engine->irq_refcount++ == 0) { 1872 if (engine->irq_refcount++ == 0) {
1771 if (HAS_L3_DPF(dev) && engine->id == RCS) 1873 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1772 I915_WRITE_IMR(engine, 1874 I915_WRITE_IMR(engine,
1773 ~(engine->irq_enable_mask | 1875 ~(engine->irq_enable_mask |
1774 GT_PARITY_ERROR(dev))); 1876 GT_PARITY_ERROR(dev_priv)));
1775 else 1877 else
1776 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1878 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1777 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1879 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1784,14 +1886,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1784static void 1886static void
1785gen6_ring_put_irq(struct intel_engine_cs *engine) 1887gen6_ring_put_irq(struct intel_engine_cs *engine)
1786{ 1888{
1787 struct drm_device *dev = engine->dev; 1889 struct drm_i915_private *dev_priv = engine->i915;
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 unsigned long flags; 1890 unsigned long flags;
1790 1891
1791 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1892 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1792 if (--engine->irq_refcount == 0) { 1893 if (--engine->irq_refcount == 0) {
1793 if (HAS_L3_DPF(dev) && engine->id == RCS) 1894 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1794 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1895 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1795 else 1896 else
1796 I915_WRITE_IMR(engine, ~0); 1897 I915_WRITE_IMR(engine, ~0);
1797 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1898 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1802,8 +1903,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
1802static bool 1903static bool
1803hsw_vebox_get_irq(struct intel_engine_cs *engine) 1904hsw_vebox_get_irq(struct intel_engine_cs *engine)
1804{ 1905{
1805 struct drm_device *dev = engine->dev; 1906 struct drm_i915_private *dev_priv = engine->i915;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 unsigned long flags; 1907 unsigned long flags;
1808 1908
1809 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1909 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1822,8 +1922,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
1822static void 1922static void
1823hsw_vebox_put_irq(struct intel_engine_cs *engine) 1923hsw_vebox_put_irq(struct intel_engine_cs *engine)
1824{ 1924{
1825 struct drm_device *dev = engine->dev; 1925 struct drm_i915_private *dev_priv = engine->i915;
1826 struct drm_i915_private *dev_priv = dev->dev_private;
1827 unsigned long flags; 1926 unsigned long flags;
1828 1927
1829 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1928 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1837,8 +1936,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
1837static bool 1936static bool
1838gen8_ring_get_irq(struct intel_engine_cs *engine) 1937gen8_ring_get_irq(struct intel_engine_cs *engine)
1839{ 1938{
1840 struct drm_device *dev = engine->dev; 1939 struct drm_i915_private *dev_priv = engine->i915;
1841 struct drm_i915_private *dev_priv = dev->dev_private;
1842 unsigned long flags; 1940 unsigned long flags;
1843 1941
1844 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1942 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1846,7 +1944,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1846 1944
1847 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1945 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1848 if (engine->irq_refcount++ == 0) { 1946 if (engine->irq_refcount++ == 0) {
1849 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1947 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1850 I915_WRITE_IMR(engine, 1948 I915_WRITE_IMR(engine,
1851 ~(engine->irq_enable_mask | 1949 ~(engine->irq_enable_mask |
1852 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1950 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1863,13 +1961,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1863static void 1961static void
1864gen8_ring_put_irq(struct intel_engine_cs *engine) 1962gen8_ring_put_irq(struct intel_engine_cs *engine)
1865{ 1963{
1866 struct drm_device *dev = engine->dev; 1964 struct drm_i915_private *dev_priv = engine->i915;
1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 unsigned long flags; 1965 unsigned long flags;
1869 1966
1870 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1967 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1871 if (--engine->irq_refcount == 0) { 1968 if (--engine->irq_refcount == 0) {
1872 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1969 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1873 I915_WRITE_IMR(engine, 1970 I915_WRITE_IMR(engine,
1874 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1971 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1875 } else { 1972 } else {
@@ -1991,12 +2088,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1991 2088
1992static void cleanup_phys_status_page(struct intel_engine_cs *engine) 2089static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1993{ 2090{
1994 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2091 struct drm_i915_private *dev_priv = engine->i915;
1995 2092
1996 if (!dev_priv->status_page_dmah) 2093 if (!dev_priv->status_page_dmah)
1997 return; 2094 return;
1998 2095
1999 drm_pci_free(engine->dev, dev_priv->status_page_dmah); 2096 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
2000 engine->status_page.page_addr = NULL; 2097 engine->status_page.page_addr = NULL;
2001} 2098}
2002 2099
@@ -2022,10 +2119,10 @@ static int init_status_page(struct intel_engine_cs *engine)
2022 unsigned flags; 2119 unsigned flags;
2023 int ret; 2120 int ret;
2024 2121
2025 obj = i915_gem_alloc_object(engine->dev, 4096); 2122 obj = i915_gem_object_create(engine->i915->dev, 4096);
2026 if (obj == NULL) { 2123 if (IS_ERR(obj)) {
2027 DRM_ERROR("Failed to allocate status page\n"); 2124 DRM_ERROR("Failed to allocate status page\n");
2028 return -ENOMEM; 2125 return PTR_ERR(obj);
2029 } 2126 }
2030 2127
2031 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2128 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +2130,7 @@ static int init_status_page(struct intel_engine_cs *engine)
2033 goto err_unref; 2130 goto err_unref;
2034 2131
2035 flags = 0; 2132 flags = 0;
2036 if (!HAS_LLC(engine->dev)) 2133 if (!HAS_LLC(engine->i915))
2037 /* On g33, we cannot place HWS above 256MiB, so 2134 /* On g33, we cannot place HWS above 256MiB, so
2038 * restrict its pinning to the low mappable arena. 2135 * restrict its pinning to the low mappable arena.
2039 * Though this restriction is not documented for 2136 * Though this restriction is not documented for
@@ -2067,11 +2164,11 @@ err_unref:
2067 2164
2068static int init_phys_status_page(struct intel_engine_cs *engine) 2165static int init_phys_status_page(struct intel_engine_cs *engine)
2069{ 2166{
2070 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2167 struct drm_i915_private *dev_priv = engine->i915;
2071 2168
2072 if (!dev_priv->status_page_dmah) { 2169 if (!dev_priv->status_page_dmah) {
2073 dev_priv->status_page_dmah = 2170 dev_priv->status_page_dmah =
2074 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); 2171 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
2075 if (!dev_priv->status_page_dmah) 2172 if (!dev_priv->status_page_dmah)
2076 return -ENOMEM; 2173 return -ENOMEM;
2077 } 2174 }
@@ -2084,20 +2181,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
2084 2181
2085void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2182void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2086{ 2183{
2184 GEM_BUG_ON(ringbuf->vma == NULL);
2185 GEM_BUG_ON(ringbuf->virtual_start == NULL);
2186
2087 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 2187 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2088 i915_gem_object_unpin_map(ringbuf->obj); 2188 i915_gem_object_unpin_map(ringbuf->obj);
2089 else 2189 else
2090 iounmap(ringbuf->virtual_start); 2190 i915_vma_unpin_iomap(ringbuf->vma);
2091 ringbuf->virtual_start = NULL; 2191 ringbuf->virtual_start = NULL;
2092 ringbuf->vma = NULL; 2192
2093 i915_gem_object_ggtt_unpin(ringbuf->obj); 2193 i915_gem_object_ggtt_unpin(ringbuf->obj);
2194 ringbuf->vma = NULL;
2094} 2195}
2095 2196
2096int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2197int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
2097 struct intel_ringbuffer *ringbuf) 2198 struct intel_ringbuffer *ringbuf)
2098{ 2199{
2099 struct drm_i915_private *dev_priv = to_i915(dev);
2100 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2101 struct drm_i915_gem_object *obj = ringbuf->obj; 2200 struct drm_i915_gem_object *obj = ringbuf->obj;
2102 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2201 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2103 unsigned flags = PIN_OFFSET_BIAS | 4096; 2202 unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2230,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2131 /* Access through the GTT requires the device to be awake. */ 2230 /* Access through the GTT requires the device to be awake. */
2132 assert_rpm_wakelock_held(dev_priv); 2231 assert_rpm_wakelock_held(dev_priv);
2133 2232
2134 addr = ioremap_wc(ggtt->mappable_base + 2233 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
2135 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2234 if (IS_ERR(addr)) {
2136 if (addr == NULL) { 2235 ret = PTR_ERR(addr);
2137 ret = -ENOMEM;
2138 goto err_unpin; 2236 goto err_unpin;
2139 } 2237 }
2140 } 2238 }
@@ -2163,9 +2261,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2163 if (!HAS_LLC(dev)) 2261 if (!HAS_LLC(dev))
2164 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2262 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
2165 if (obj == NULL) 2263 if (obj == NULL)
2166 obj = i915_gem_alloc_object(dev, ringbuf->size); 2264 obj = i915_gem_object_create(dev, ringbuf->size);
2167 if (obj == NULL) 2265 if (IS_ERR(obj))
2168 return -ENOMEM; 2266 return PTR_ERR(obj);
2169 2267
2170 /* mark ring buffers as read-only from GPU side by default */ 2268 /* mark ring buffers as read-only from GPU side by default */
2171 obj->gt_ro = 1; 2269 obj->gt_ro = 1;
@@ -2197,13 +2295,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2197 * of the buffer. 2295 * of the buffer.
2198 */ 2296 */
2199 ring->effective_size = size; 2297 ring->effective_size = size;
2200 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2298 if (IS_I830(engine->i915) || IS_845G(engine->i915))
2201 ring->effective_size -= 2 * CACHELINE_BYTES; 2299 ring->effective_size -= 2 * CACHELINE_BYTES;
2202 2300
2203 ring->last_retired_head = -1; 2301 ring->last_retired_head = -1;
2204 intel_ring_update_space(ring); 2302 intel_ring_update_space(ring);
2205 2303
2206 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2304 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
2207 if (ret) { 2305 if (ret) {
2208 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2306 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2209 engine->name, ret); 2307 engine->name, ret);
@@ -2226,12 +2324,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2226static int intel_init_ring_buffer(struct drm_device *dev, 2324static int intel_init_ring_buffer(struct drm_device *dev,
2227 struct intel_engine_cs *engine) 2325 struct intel_engine_cs *engine)
2228{ 2326{
2327 struct drm_i915_private *dev_priv = to_i915(dev);
2229 struct intel_ringbuffer *ringbuf; 2328 struct intel_ringbuffer *ringbuf;
2230 int ret; 2329 int ret;
2231 2330
2232 WARN_ON(engine->buffer); 2331 WARN_ON(engine->buffer);
2233 2332
2234 engine->dev = dev; 2333 engine->i915 = dev_priv;
2235 INIT_LIST_HEAD(&engine->active_list); 2334 INIT_LIST_HEAD(&engine->active_list);
2236 INIT_LIST_HEAD(&engine->request_list); 2335 INIT_LIST_HEAD(&engine->request_list);
2237 INIT_LIST_HEAD(&engine->execlist_queue); 2336 INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2249,7 +2348,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2249 } 2348 }
2250 engine->buffer = ringbuf; 2349 engine->buffer = ringbuf;
2251 2350
2252 if (I915_NEED_GFX_HWS(dev)) { 2351 if (I915_NEED_GFX_HWS(dev_priv)) {
2253 ret = init_status_page(engine); 2352 ret = init_status_page(engine);
2254 if (ret) 2353 if (ret)
2255 goto error; 2354 goto error;
@@ -2260,7 +2359,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2260 goto error; 2359 goto error;
2261 } 2360 }
2262 2361
2263 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2362 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
2264 if (ret) { 2363 if (ret) {
2265 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2364 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2266 engine->name, ret); 2365 engine->name, ret);
@@ -2286,11 +2385,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2286 if (!intel_engine_initialized(engine)) 2385 if (!intel_engine_initialized(engine))
2287 return; 2386 return;
2288 2387
2289 dev_priv = to_i915(engine->dev); 2388 dev_priv = engine->i915;
2290 2389
2291 if (engine->buffer) { 2390 if (engine->buffer) {
2292 intel_stop_engine(engine); 2391 intel_stop_engine(engine);
2293 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2392 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2294 2393
2295 intel_unpin_ringbuffer_obj(engine->buffer); 2394 intel_unpin_ringbuffer_obj(engine->buffer);
2296 intel_ringbuffer_free(engine->buffer); 2395 intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2399,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2300 if (engine->cleanup) 2399 if (engine->cleanup)
2301 engine->cleanup(engine); 2400 engine->cleanup(engine);
2302 2401
2303 if (I915_NEED_GFX_HWS(engine->dev)) { 2402 if (I915_NEED_GFX_HWS(dev_priv)) {
2304 cleanup_status_page(engine); 2403 cleanup_status_page(engine);
2305 } else { 2404 } else {
2306 WARN_ON(engine->id != RCS); 2405 WARN_ON(engine->id != RCS);
@@ -2309,7 +2408,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2309 2408
2310 i915_cmd_parser_fini_ring(engine); 2409 i915_cmd_parser_fini_ring(engine);
2311 i915_gem_batch_pool_fini(&engine->batch_pool); 2410 i915_gem_batch_pool_fini(&engine->batch_pool);
2312 engine->dev = NULL; 2411 engine->i915 = NULL;
2313} 2412}
2314 2413
2315int intel_engine_idle(struct intel_engine_cs *engine) 2414int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2332,46 +2431,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
2332 2431
2333int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2432int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2334{ 2433{
2335 request->ringbuf = request->engine->buffer; 2434 int ret;
2336 return 0;
2337}
2338 2435
2339int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2436 /* Flush enough space to reduce the likelihood of waiting after
2340{ 2437 * we start building the request - in which case we will just
2341 /* 2438 * have to repeat work.
2342 * The first call merely notes the reserve request and is common for
2343 * all back ends. The subsequent localised _begin() call actually
2344 * ensures that the reservation is available. Without the begin, if
2345 * the request creator immediately submitted the request without
2346 * adding any commands to it then there might not actually be
2347 * sufficient room for the submission commands.
2348 */ 2439 */
2349 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2440 request->reserved_space += LEGACY_REQUEST_SIZE;
2350 2441
2351 return intel_ring_begin(request, 0); 2442 request->ringbuf = request->engine->buffer;
2352}
2353
2354void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2355{
2356 GEM_BUG_ON(ringbuf->reserved_size);
2357 ringbuf->reserved_size = size;
2358}
2359
2360void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2361{
2362 GEM_BUG_ON(!ringbuf->reserved_size);
2363 ringbuf->reserved_size = 0;
2364}
2365 2443
2366void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2444 ret = intel_ring_begin(request, 0);
2367{ 2445 if (ret)
2368 GEM_BUG_ON(!ringbuf->reserved_size); 2446 return ret;
2369 ringbuf->reserved_size = 0;
2370}
2371 2447
2372void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2448 request->reserved_space -= LEGACY_REQUEST_SIZE;
2373{ 2449 return 0;
2374 GEM_BUG_ON(ringbuf->reserved_size);
2375} 2450}
2376 2451
2377static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2452static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2393,7 +2468,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2393 * 2468 *
2394 * See also i915_gem_request_alloc() and i915_add_request(). 2469 * See also i915_gem_request_alloc() and i915_add_request().
2395 */ 2470 */
2396 GEM_BUG_ON(!ringbuf->reserved_size); 2471 GEM_BUG_ON(!req->reserved_space);
2397 2472
2398 list_for_each_entry(target, &engine->request_list, list) { 2473 list_for_each_entry(target, &engine->request_list, list) {
2399 unsigned space; 2474 unsigned space;
@@ -2428,7 +2503,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2428 int total_bytes, wait_bytes; 2503 int total_bytes, wait_bytes;
2429 bool need_wrap = false; 2504 bool need_wrap = false;
2430 2505
2431 total_bytes = bytes + ringbuf->reserved_size; 2506 total_bytes = bytes + req->reserved_space;
2432 2507
2433 if (unlikely(bytes > remain_usable)) { 2508 if (unlikely(bytes > remain_usable)) {
2434 /* 2509 /*
@@ -2444,7 +2519,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2444 * and only need to effectively wait for the reserved 2519 * and only need to effectively wait for the reserved
2445 * size space from the start of ringbuffer. 2520 * size space from the start of ringbuffer.
2446 */ 2521 */
2447 wait_bytes = remain_actual + ringbuf->reserved_size; 2522 wait_bytes = remain_actual + req->reserved_space;
2448 } else { 2523 } else {
2449 /* No wrapping required, just waiting. */ 2524 /* No wrapping required, just waiting. */
2450 wait_bytes = total_bytes; 2525 wait_bytes = total_bytes;
@@ -2501,7 +2576,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2501 2576
2502void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2577void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2503{ 2578{
2504 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2579 struct drm_i915_private *dev_priv = engine->i915;
2505 2580
2506 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2581 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2507 * so long as the semaphore value in the register/page is greater 2582 * so long as the semaphore value in the register/page is greater
@@ -2511,7 +2586,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2511 * the semaphore value, then when the seqno moves backwards all 2586 * the semaphore value, then when the seqno moves backwards all
2512 * future waits will complete instantly (causing rendering corruption). 2587 * future waits will complete instantly (causing rendering corruption).
2513 */ 2588 */
2514 if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { 2589 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
2515 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2590 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2516 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2591 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2517 if (HAS_VEBOX(dev_priv)) 2592 if (HAS_VEBOX(dev_priv))
@@ -2537,7 +2612,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2537static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2612static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2538 u32 value) 2613 u32 value)
2539{ 2614{
2540 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2615 struct drm_i915_private *dev_priv = engine->i915;
2541 2616
2542 /* Every tail move must follow the sequence below */ 2617 /* Every tail move must follow the sequence below */
2543 2618
@@ -2579,7 +2654,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2579 return ret; 2654 return ret;
2580 2655
2581 cmd = MI_FLUSH_DW; 2656 cmd = MI_FLUSH_DW;
2582 if (INTEL_INFO(engine->dev)->gen >= 8) 2657 if (INTEL_GEN(req->i915) >= 8)
2583 cmd += 1; 2658 cmd += 1;
2584 2659
2585 /* We always require a command barrier so that subsequent 2660 /* We always require a command barrier so that subsequent
@@ -2601,7 +2676,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2601 intel_ring_emit(engine, cmd); 2676 intel_ring_emit(engine, cmd);
2602 intel_ring_emit(engine, 2677 intel_ring_emit(engine,
2603 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2678 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2604 if (INTEL_INFO(engine->dev)->gen >= 8) { 2679 if (INTEL_GEN(req->i915) >= 8) {
2605 intel_ring_emit(engine, 0); /* upper addr */ 2680 intel_ring_emit(engine, 0); /* upper addr */
2606 intel_ring_emit(engine, 0); /* value */ 2681 intel_ring_emit(engine, 0); /* value */
2607 } else { 2682 } else {
@@ -2692,7 +2767,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2692 u32 invalidate, u32 flush) 2767 u32 invalidate, u32 flush)
2693{ 2768{
2694 struct intel_engine_cs *engine = req->engine; 2769 struct intel_engine_cs *engine = req->engine;
2695 struct drm_device *dev = engine->dev;
2696 uint32_t cmd; 2770 uint32_t cmd;
2697 int ret; 2771 int ret;
2698 2772
@@ -2701,7 +2775,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2701 return ret; 2775 return ret;
2702 2776
2703 cmd = MI_FLUSH_DW; 2777 cmd = MI_FLUSH_DW;
2704 if (INTEL_INFO(dev)->gen >= 8) 2778 if (INTEL_GEN(req->i915) >= 8)
2705 cmd += 1; 2779 cmd += 1;
2706 2780
2707 /* We always require a command barrier so that subsequent 2781 /* We always require a command barrier so that subsequent
@@ -2722,7 +2796,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2722 intel_ring_emit(engine, cmd); 2796 intel_ring_emit(engine, cmd);
2723 intel_ring_emit(engine, 2797 intel_ring_emit(engine,
2724 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2798 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2725 if (INTEL_INFO(dev)->gen >= 8) { 2799 if (INTEL_GEN(req->i915) >= 8) {
2726 intel_ring_emit(engine, 0); /* upper addr */ 2800 intel_ring_emit(engine, 0); /* upper addr */
2727 intel_ring_emit(engine, 0); /* value */ 2801 intel_ring_emit(engine, 0); /* value */
2728 } else { 2802 } else {
@@ -2747,10 +2821,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2747 engine->hw_id = 0; 2821 engine->hw_id = 0;
2748 engine->mmio_base = RENDER_RING_BASE; 2822 engine->mmio_base = RENDER_RING_BASE;
2749 2823
2750 if (INTEL_INFO(dev)->gen >= 8) { 2824 if (INTEL_GEN(dev_priv) >= 8) {
2751 if (i915_semaphore_is_enabled(dev)) { 2825 if (i915_semaphore_is_enabled(dev_priv)) {
2752 obj = i915_gem_alloc_object(dev, 4096); 2826 obj = i915_gem_object_create(dev, 4096);
2753 if (obj == NULL) { 2827 if (IS_ERR(obj)) {
2754 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2828 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2755 i915.semaphores = 0; 2829 i915.semaphores = 0;
2756 } else { 2830 } else {
@@ -2766,25 +2840,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2766 } 2840 }
2767 2841
2768 engine->init_context = intel_rcs_ctx_init; 2842 engine->init_context = intel_rcs_ctx_init;
2769 engine->add_request = gen6_add_request; 2843 engine->add_request = gen8_render_add_request;
2770 engine->flush = gen8_render_ring_flush; 2844 engine->flush = gen8_render_ring_flush;
2771 engine->irq_get = gen8_ring_get_irq; 2845 engine->irq_get = gen8_ring_get_irq;
2772 engine->irq_put = gen8_ring_put_irq; 2846 engine->irq_put = gen8_ring_put_irq;
2773 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2847 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2774 engine->irq_seqno_barrier = gen6_seqno_barrier;
2775 engine->get_seqno = ring_get_seqno; 2848 engine->get_seqno = ring_get_seqno;
2776 engine->set_seqno = ring_set_seqno; 2849 engine->set_seqno = ring_set_seqno;
2777 if (i915_semaphore_is_enabled(dev)) { 2850 if (i915_semaphore_is_enabled(dev_priv)) {
2778 WARN_ON(!dev_priv->semaphore_obj); 2851 WARN_ON(!dev_priv->semaphore_obj);
2779 engine->semaphore.sync_to = gen8_ring_sync; 2852 engine->semaphore.sync_to = gen8_ring_sync;
2780 engine->semaphore.signal = gen8_rcs_signal; 2853 engine->semaphore.signal = gen8_rcs_signal;
2781 GEN8_RING_SEMAPHORE_INIT(engine); 2854 GEN8_RING_SEMAPHORE_INIT(engine);
2782 } 2855 }
2783 } else if (INTEL_INFO(dev)->gen >= 6) { 2856 } else if (INTEL_GEN(dev_priv) >= 6) {
2784 engine->init_context = intel_rcs_ctx_init; 2857 engine->init_context = intel_rcs_ctx_init;
2785 engine->add_request = gen6_add_request; 2858 engine->add_request = gen6_add_request;
2786 engine->flush = gen7_render_ring_flush; 2859 engine->flush = gen7_render_ring_flush;
2787 if (INTEL_INFO(dev)->gen == 6) 2860 if (IS_GEN6(dev_priv))
2788 engine->flush = gen6_render_ring_flush; 2861 engine->flush = gen6_render_ring_flush;
2789 engine->irq_get = gen6_ring_get_irq; 2862 engine->irq_get = gen6_ring_get_irq;
2790 engine->irq_put = gen6_ring_put_irq; 2863 engine->irq_put = gen6_ring_put_irq;
@@ -2792,7 +2865,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2792 engine->irq_seqno_barrier = gen6_seqno_barrier; 2865 engine->irq_seqno_barrier = gen6_seqno_barrier;
2793 engine->get_seqno = ring_get_seqno; 2866 engine->get_seqno = ring_get_seqno;
2794 engine->set_seqno = ring_set_seqno; 2867 engine->set_seqno = ring_set_seqno;
2795 if (i915_semaphore_is_enabled(dev)) { 2868 if (i915_semaphore_is_enabled(dev_priv)) {
2796 engine->semaphore.sync_to = gen6_ring_sync; 2869 engine->semaphore.sync_to = gen6_ring_sync;
2797 engine->semaphore.signal = gen6_signal; 2870 engine->semaphore.signal = gen6_signal;
2798 /* 2871 /*
@@ -2813,7 +2886,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2813 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2886 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2814 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2887 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2815 } 2888 }
2816 } else if (IS_GEN5(dev)) { 2889 } else if (IS_GEN5(dev_priv)) {
2817 engine->add_request = pc_render_add_request; 2890 engine->add_request = pc_render_add_request;
2818 engine->flush = gen4_render_ring_flush; 2891 engine->flush = gen4_render_ring_flush;
2819 engine->get_seqno = pc_render_get_seqno; 2892 engine->get_seqno = pc_render_get_seqno;
@@ -2824,13 +2897,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2824 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2897 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2825 } else { 2898 } else {
2826 engine->add_request = i9xx_add_request; 2899 engine->add_request = i9xx_add_request;
2827 if (INTEL_INFO(dev)->gen < 4) 2900 if (INTEL_GEN(dev_priv) < 4)
2828 engine->flush = gen2_render_ring_flush; 2901 engine->flush = gen2_render_ring_flush;
2829 else 2902 else
2830 engine->flush = gen4_render_ring_flush; 2903 engine->flush = gen4_render_ring_flush;
2831 engine->get_seqno = ring_get_seqno; 2904 engine->get_seqno = ring_get_seqno;
2832 engine->set_seqno = ring_set_seqno; 2905 engine->set_seqno = ring_set_seqno;
2833 if (IS_GEN2(dev)) { 2906 if (IS_GEN2(dev_priv)) {
2834 engine->irq_get = i8xx_ring_get_irq; 2907 engine->irq_get = i8xx_ring_get_irq;
2835 engine->irq_put = i8xx_ring_put_irq; 2908 engine->irq_put = i8xx_ring_put_irq;
2836 } else { 2909 } else {
@@ -2841,15 +2914,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2841 } 2914 }
2842 engine->write_tail = ring_write_tail; 2915 engine->write_tail = ring_write_tail;
2843 2916
2844 if (IS_HASWELL(dev)) 2917 if (IS_HASWELL(dev_priv))
2845 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2918 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2846 else if (IS_GEN8(dev)) 2919 else if (IS_GEN8(dev_priv))
2847 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2920 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2848 else if (INTEL_INFO(dev)->gen >= 6) 2921 else if (INTEL_GEN(dev_priv) >= 6)
2849 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2922 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2850 else if (INTEL_INFO(dev)->gen >= 4) 2923 else if (INTEL_GEN(dev_priv) >= 4)
2851 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 2924 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2852 else if (IS_I830(dev) || IS_845G(dev)) 2925 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2853 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 2926 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2854 else 2927 else
2855 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 2928 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
@@ -2857,11 +2930,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2857 engine->cleanup = render_ring_cleanup; 2930 engine->cleanup = render_ring_cleanup;
2858 2931
2859 /* Workaround batchbuffer to combat CS tlb bug. */ 2932 /* Workaround batchbuffer to combat CS tlb bug. */
2860 if (HAS_BROKEN_CS_TLB(dev)) { 2933 if (HAS_BROKEN_CS_TLB(dev_priv)) {
2861 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2934 obj = i915_gem_object_create(dev, I830_WA_SIZE);
2862 if (obj == NULL) { 2935 if (IS_ERR(obj)) {
2863 DRM_ERROR("Failed to allocate batch bo\n"); 2936 DRM_ERROR("Failed to allocate batch bo\n");
2864 return -ENOMEM; 2937 return PTR_ERR(obj);
2865 } 2938 }
2866 2939
2867 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2940 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
@@ -2879,7 +2952,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2879 if (ret) 2952 if (ret)
2880 return ret; 2953 return ret;
2881 2954
2882 if (INTEL_INFO(dev)->gen >= 5) { 2955 if (INTEL_GEN(dev_priv) >= 5) {
2883 ret = intel_init_pipe_control(engine); 2956 ret = intel_init_pipe_control(engine);
2884 if (ret) 2957 if (ret)
2885 return ret; 2958 return ret;
@@ -2899,24 +2972,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2899 engine->hw_id = 1; 2972 engine->hw_id = 1;
2900 2973
2901 engine->write_tail = ring_write_tail; 2974 engine->write_tail = ring_write_tail;
2902 if (INTEL_INFO(dev)->gen >= 6) { 2975 if (INTEL_GEN(dev_priv) >= 6) {
2903 engine->mmio_base = GEN6_BSD_RING_BASE; 2976 engine->mmio_base = GEN6_BSD_RING_BASE;
2904 /* gen6 bsd needs a special wa for tail updates */ 2977 /* gen6 bsd needs a special wa for tail updates */
2905 if (IS_GEN6(dev)) 2978 if (IS_GEN6(dev_priv))
2906 engine->write_tail = gen6_bsd_ring_write_tail; 2979 engine->write_tail = gen6_bsd_ring_write_tail;
2907 engine->flush = gen6_bsd_ring_flush; 2980 engine->flush = gen6_bsd_ring_flush;
2908 engine->add_request = gen6_add_request; 2981 engine->add_request = gen6_add_request;
2909 engine->irq_seqno_barrier = gen6_seqno_barrier; 2982 engine->irq_seqno_barrier = gen6_seqno_barrier;
2910 engine->get_seqno = ring_get_seqno; 2983 engine->get_seqno = ring_get_seqno;
2911 engine->set_seqno = ring_set_seqno; 2984 engine->set_seqno = ring_set_seqno;
2912 if (INTEL_INFO(dev)->gen >= 8) { 2985 if (INTEL_GEN(dev_priv) >= 8) {
2913 engine->irq_enable_mask = 2986 engine->irq_enable_mask =
2914 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2987 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2915 engine->irq_get = gen8_ring_get_irq; 2988 engine->irq_get = gen8_ring_get_irq;
2916 engine->irq_put = gen8_ring_put_irq; 2989 engine->irq_put = gen8_ring_put_irq;
2917 engine->dispatch_execbuffer = 2990 engine->dispatch_execbuffer =
2918 gen8_ring_dispatch_execbuffer; 2991 gen8_ring_dispatch_execbuffer;
2919 if (i915_semaphore_is_enabled(dev)) { 2992 if (i915_semaphore_is_enabled(dev_priv)) {
2920 engine->semaphore.sync_to = gen8_ring_sync; 2993 engine->semaphore.sync_to = gen8_ring_sync;
2921 engine->semaphore.signal = gen8_xcs_signal; 2994 engine->semaphore.signal = gen8_xcs_signal;
2922 GEN8_RING_SEMAPHORE_INIT(engine); 2995 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -2927,7 +3000,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2927 engine->irq_put = gen6_ring_put_irq; 3000 engine->irq_put = gen6_ring_put_irq;
2928 engine->dispatch_execbuffer = 3001 engine->dispatch_execbuffer =
2929 gen6_ring_dispatch_execbuffer; 3002 gen6_ring_dispatch_execbuffer;
2930 if (i915_semaphore_is_enabled(dev)) { 3003 if (i915_semaphore_is_enabled(dev_priv)) {
2931 engine->semaphore.sync_to = gen6_ring_sync; 3004 engine->semaphore.sync_to = gen6_ring_sync;
2932 engine->semaphore.signal = gen6_signal; 3005 engine->semaphore.signal = gen6_signal;
2933 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 3006 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -2948,7 +3021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2948 engine->add_request = i9xx_add_request; 3021 engine->add_request = i9xx_add_request;
2949 engine->get_seqno = ring_get_seqno; 3022 engine->get_seqno = ring_get_seqno;
2950 engine->set_seqno = ring_set_seqno; 3023 engine->set_seqno = ring_set_seqno;
2951 if (IS_GEN5(dev)) { 3024 if (IS_GEN5(dev_priv)) {
2952 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 3025 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2953 engine->irq_get = gen5_ring_get_irq; 3026 engine->irq_get = gen5_ring_get_irq;
2954 engine->irq_put = gen5_ring_put_irq; 3027 engine->irq_put = gen5_ring_put_irq;
@@ -2990,7 +3063,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2990 engine->irq_put = gen8_ring_put_irq; 3063 engine->irq_put = gen8_ring_put_irq;
2991 engine->dispatch_execbuffer = 3064 engine->dispatch_execbuffer =
2992 gen8_ring_dispatch_execbuffer; 3065 gen8_ring_dispatch_execbuffer;
2993 if (i915_semaphore_is_enabled(dev)) { 3066 if (i915_semaphore_is_enabled(dev_priv)) {
2994 engine->semaphore.sync_to = gen8_ring_sync; 3067 engine->semaphore.sync_to = gen8_ring_sync;
2995 engine->semaphore.signal = gen8_xcs_signal; 3068 engine->semaphore.signal = gen8_xcs_signal;
2996 GEN8_RING_SEMAPHORE_INIT(engine); 3069 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3017,13 +3090,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3017 engine->irq_seqno_barrier = gen6_seqno_barrier; 3090 engine->irq_seqno_barrier = gen6_seqno_barrier;
3018 engine->get_seqno = ring_get_seqno; 3091 engine->get_seqno = ring_get_seqno;
3019 engine->set_seqno = ring_set_seqno; 3092 engine->set_seqno = ring_set_seqno;
3020 if (INTEL_INFO(dev)->gen >= 8) { 3093 if (INTEL_GEN(dev_priv) >= 8) {
3021 engine->irq_enable_mask = 3094 engine->irq_enable_mask =
3022 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 3095 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3023 engine->irq_get = gen8_ring_get_irq; 3096 engine->irq_get = gen8_ring_get_irq;
3024 engine->irq_put = gen8_ring_put_irq; 3097 engine->irq_put = gen8_ring_put_irq;
3025 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3098 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3026 if (i915_semaphore_is_enabled(dev)) { 3099 if (i915_semaphore_is_enabled(dev_priv)) {
3027 engine->semaphore.sync_to = gen8_ring_sync; 3100 engine->semaphore.sync_to = gen8_ring_sync;
3028 engine->semaphore.signal = gen8_xcs_signal; 3101 engine->semaphore.signal = gen8_xcs_signal;
3029 GEN8_RING_SEMAPHORE_INIT(engine); 3102 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3033,7 +3106,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3033 engine->irq_get = gen6_ring_get_irq; 3106 engine->irq_get = gen6_ring_get_irq;
3034 engine->irq_put = gen6_ring_put_irq; 3107 engine->irq_put = gen6_ring_put_irq;
3035 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3108 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3036 if (i915_semaphore_is_enabled(dev)) { 3109 if (i915_semaphore_is_enabled(dev_priv)) {
3037 engine->semaphore.signal = gen6_signal; 3110 engine->semaphore.signal = gen6_signal;
3038 engine->semaphore.sync_to = gen6_ring_sync; 3111 engine->semaphore.sync_to = gen6_ring_sync;
3039 /* 3112 /*
@@ -3078,13 +3151,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3078 engine->get_seqno = ring_get_seqno; 3151 engine->get_seqno = ring_get_seqno;
3079 engine->set_seqno = ring_set_seqno; 3152 engine->set_seqno = ring_set_seqno;
3080 3153
3081 if (INTEL_INFO(dev)->gen >= 8) { 3154 if (INTEL_GEN(dev_priv) >= 8) {
3082 engine->irq_enable_mask = 3155 engine->irq_enable_mask =
3083 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3156 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3084 engine->irq_get = gen8_ring_get_irq; 3157 engine->irq_get = gen8_ring_get_irq;
3085 engine->irq_put = gen8_ring_put_irq; 3158 engine->irq_put = gen8_ring_put_irq;
3086 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3159 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3087 if (i915_semaphore_is_enabled(dev)) { 3160 if (i915_semaphore_is_enabled(dev_priv)) {
3088 engine->semaphore.sync_to = gen8_ring_sync; 3161 engine->semaphore.sync_to = gen8_ring_sync;
3089 engine->semaphore.signal = gen8_xcs_signal; 3162 engine->semaphore.signal = gen8_xcs_signal;
3090 GEN8_RING_SEMAPHORE_INIT(engine); 3163 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3094,7 +3167,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3094 engine->irq_get = hsw_vebox_get_irq; 3167 engine->irq_get = hsw_vebox_get_irq;
3095 engine->irq_put = hsw_vebox_put_irq; 3168 engine->irq_put = hsw_vebox_put_irq;
3096 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3169 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3097 if (i915_semaphore_is_enabled(dev)) { 3170 if (i915_semaphore_is_enabled(dev_priv)) {
3098 engine->semaphore.sync_to = gen6_ring_sync; 3171 engine->semaphore.sync_to = gen6_ring_sync;
3099 engine->semaphore.signal = gen6_signal; 3172 engine->semaphore.signal = gen6_signal;
3100 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 3173 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..b33c876fed20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -107,7 +107,6 @@ struct intel_ringbuffer {
107 int space; 107 int space;
108 int size; 108 int size;
109 int effective_size; 109 int effective_size;
110 int reserved_size;
111 110
112 /** We track the position of the requests in the ring buffer, and 111 /** We track the position of the requests in the ring buffer, and
113 * when each is retired we increment last_retired_head as the GPU 112 * when each is retired we increment last_retired_head as the GPU
@@ -120,7 +119,7 @@ struct intel_ringbuffer {
120 u32 last_retired_head; 119 u32 last_retired_head;
121}; 120};
122 121
123struct intel_context; 122struct i915_gem_context;
124struct drm_i915_reg_table; 123struct drm_i915_reg_table;
125 124
126/* 125/*
@@ -142,7 +141,8 @@ struct i915_ctx_workarounds {
142 struct drm_i915_gem_object *obj; 141 struct drm_i915_gem_object *obj;
143}; 142};
144 143
145struct intel_engine_cs { 144struct intel_engine_cs {
145 struct drm_i915_private *i915;
146 const char *name; 146 const char *name;
147 enum intel_engine_id { 147 enum intel_engine_id {
148 RCS = 0, 148 RCS = 0,
@@ -157,7 +157,6 @@ struct intel_engine_cs {
157 unsigned int hw_id; 157 unsigned int hw_id;
158 unsigned int guc_id; /* XXX same as hw_id? */ 158 unsigned int guc_id; /* XXX same as hw_id? */
159 u32 mmio_base; 159 u32 mmio_base;
160 struct drm_device *dev;
161 struct intel_ringbuffer *buffer; 160 struct intel_ringbuffer *buffer;
162 struct list_head buffers; 161 struct list_head buffers;
163 162
@@ -268,7 +267,6 @@ struct intel_engine_cs {
268 struct tasklet_struct irq_tasklet; 267 struct tasklet_struct irq_tasklet;
269 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ 268 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
270 struct list_head execlist_queue; 269 struct list_head execlist_queue;
271 struct list_head execlist_retired_req_list;
272 unsigned int fw_domains; 270 unsigned int fw_domains;
273 unsigned int next_context_status_buffer; 271 unsigned int next_context_status_buffer;
274 unsigned int idle_lite_restore_wa; 272 unsigned int idle_lite_restore_wa;
@@ -312,7 +310,7 @@ struct intel_engine_cs {
312 310
313 wait_queue_head_t irq_queue; 311 wait_queue_head_t irq_queue;
314 312
315 struct intel_context *last_context; 313 struct i915_gem_context *last_context;
316 314
317 struct intel_ring_hangcheck hangcheck; 315 struct intel_ring_hangcheck hangcheck;
318 316
@@ -352,7 +350,7 @@ struct intel_engine_cs {
352static inline bool 350static inline bool
353intel_engine_initialized(struct intel_engine_cs *engine) 351intel_engine_initialized(struct intel_engine_cs *engine)
354{ 352{
355 return engine->dev != NULL; 353 return engine->i915 != NULL;
356} 354}
357 355
358static inline unsigned 356static inline unsigned
@@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
427 425
428struct intel_ringbuffer * 426struct intel_ringbuffer *
429intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 427intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
430int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 428int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
431 struct intel_ringbuffer *ringbuf); 429 struct intel_ringbuffer *ringbuf);
432void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 430void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
433void intel_ringbuffer_free(struct intel_ringbuffer *ring); 431void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
486/* 484/*
487 * Arbitrary size for largest possible 'add request' sequence. The code paths 485 * Arbitrary size for largest possible 'add request' sequence. The code paths
488 * are complex and variable. Empirical measurement shows that the worst case 486 * are complex and variable. Empirical measurement shows that the worst case
489 * is ILK at 136 words. Reserving too much is better than reserving too little 487 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
490 * as that allows for corner cases that might have been missed. So the figure 488 * we need to allocate double the largest single packet within that emission
491 * has been rounded up to 160 words. 489 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
492 */ 490 */
493#define MIN_SPACE_FOR_ADD_REQUEST 160 491#define MIN_SPACE_FOR_ADD_REQUEST 336
494 492
495/* 493static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
496 * Reserve space in the ring to guarantee that the i915_add_request() call 494{
497 * will always have sufficient room to do its stuff. The request creation 495 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
498 * code calls this automatically. 496}
499 */
500void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
501/* Cancel the reservation, e.g. because the request is being discarded. */
502void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
503/* Use the reserved space - for use by i915_add_request() only. */
504void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
505/* Finish with the reserved space - for use by i915_add_request() only. */
506void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
507
508/* Legacy ringbuffer specific portion of reservation code: */
509int intel_ring_reserve_space(struct drm_i915_gem_request *request);
510 497
511#endif /* _INTEL_RINGBUFFER_H_ */ 498#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..e856d49d6dc3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,9 @@
65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66 int power_well_id); 66 int power_well_id);
67 67
68static struct i915_power_well *
69lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
70
68const char * 71const char *
69intel_display_power_domain_str(enum intel_display_power_domain domain) 72intel_display_power_domain_str(enum intel_display_power_domain domain)
70{ 73{
@@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv,
151 power_well->ops->disable(dev_priv, power_well); 154 power_well->ops->disable(dev_priv, power_well);
152} 155}
153 156
157static void intel_power_well_get(struct drm_i915_private *dev_priv,
158 struct i915_power_well *power_well)
159{
160 if (!power_well->count++)
161 intel_power_well_enable(dev_priv, power_well);
162}
163
164static void intel_power_well_put(struct drm_i915_private *dev_priv,
165 struct i915_power_well *power_well)
166{
167 WARN(!power_well->count, "Use count on power well %s is already zero",
168 power_well->name);
169
170 if (!--power_well->count)
171 intel_power_well_disable(dev_priv, power_well);
172}
173
154/* 174/*
155 * We should only use the power well if we explicitly asked the hardware to 175 * We should only use the power well if we explicitly asked the hardware to
156 * enable it, so check if it's enabled and also check if we've requested it to 176 * enable it, so check if it's enabled and also check if we've requested it to
@@ -419,6 +439,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
419 BIT(POWER_DOMAIN_MODESET) | \ 439 BIT(POWER_DOMAIN_MODESET) | \
420 BIT(POWER_DOMAIN_AUX_A) | \ 440 BIT(POWER_DOMAIN_AUX_A) | \
421 BIT(POWER_DOMAIN_INIT)) 441 BIT(POWER_DOMAIN_INIT))
442#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
443 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
444 BIT(POWER_DOMAIN_AUX_A) | \
445 BIT(POWER_DOMAIN_INIT))
446#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
447 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
448 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
449 BIT(POWER_DOMAIN_AUX_B) | \
450 BIT(POWER_DOMAIN_AUX_C) | \
451 BIT(POWER_DOMAIN_INIT))
422 452
423static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 453static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
424{ 454{
@@ -800,21 +830,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
800 skl_set_power_well(dev_priv, power_well, false); 830 skl_set_power_well(dev_priv, power_well, false);
801} 831}
802 832
833static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
834{
835 enum skl_disp_power_wells power_well_id = power_well->data;
836
837 return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
838}
839
840static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
841 struct i915_power_well *power_well)
842{
843 enum skl_disp_power_wells power_well_id = power_well->data;
844 struct i915_power_well *cmn_a_well;
845
846 if (power_well_id == BXT_DPIO_CMN_BC) {
847 /*
848 * We need to copy the GRC calibration value from the eDP PHY,
849 * so make sure it's powered up.
850 */
851 cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
852 intel_power_well_get(dev_priv, cmn_a_well);
853 }
854
855 bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
856
857 if (power_well_id == BXT_DPIO_CMN_BC)
858 intel_power_well_put(dev_priv, cmn_a_well);
859}
860
861static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
862 struct i915_power_well *power_well)
863{
864 bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
865}
866
867static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
868 struct i915_power_well *power_well)
869{
870 return bxt_ddi_phy_is_enabled(dev_priv,
871 bxt_power_well_to_phy(power_well));
872}
873
874static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
875 struct i915_power_well *power_well)
876{
877 if (power_well->count > 0)
878 bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
879 else
880 bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
881}
882
883
884static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
885{
886 struct i915_power_well *power_well;
887
888 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
889 if (power_well->count > 0)
890 bxt_ddi_phy_verify_state(dev_priv,
891 bxt_power_well_to_phy(power_well));
892
893 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
894 if (power_well->count > 0)
895 bxt_ddi_phy_verify_state(dev_priv,
896 bxt_power_well_to_phy(power_well));
897}
898
803static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 899static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
804 struct i915_power_well *power_well) 900 struct i915_power_well *power_well)
805{ 901{
806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 902 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
807} 903}
808 904
905static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
906{
907 u32 tmp = I915_READ(DBUF_CTL);
908
909 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
910 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
911 "Unexpected DBuf power power state (0x%08x)\n", tmp);
912}
913
809static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 914static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
810 struct i915_power_well *power_well) 915 struct i915_power_well *power_well)
811{ 916{
812 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 917 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
813 918
814 if (IS_BROXTON(dev_priv)) { 919 WARN_ON(dev_priv->cdclk_freq !=
815 broxton_cdclk_verify_state(dev_priv); 920 dev_priv->display.get_display_clock_speed(dev_priv->dev));
816 broxton_ddi_phy_verify_state(dev_priv); 921
817 } 922 gen9_assert_dbuf_enabled(dev_priv);
923
924 if (IS_BROXTON(dev_priv))
925 bxt_verify_ddi_phy_power_wells(dev_priv);
818} 926}
819 927
820static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 928static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,6 +1056,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
948 */ 1056 */
949 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1057 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
950 I915_WRITE(CBR1_VLV, 0); 1058 I915_WRITE(CBR1_VLV, 0);
1059
1060 WARN_ON(dev_priv->rawclk_freq == 0);
1061
1062 I915_WRITE(RAWCLK_FREQ_VLV,
1063 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
951} 1064}
952 1065
953static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1066static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -1501,10 +1614,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1501 struct i915_power_well *power_well; 1614 struct i915_power_well *power_well;
1502 int i; 1615 int i;
1503 1616
1504 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1617 for_each_power_well(i, power_well, BIT(domain), power_domains)
1505 if (!power_well->count++) 1618 intel_power_well_get(dev_priv, power_well);
1506 intel_power_well_enable(dev_priv, power_well);
1507 }
1508 1619
1509 power_domains->domain_use_count[domain]++; 1620 power_domains->domain_use_count[domain]++;
1510} 1621}
@@ -1598,14 +1709,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1598 intel_display_power_domain_str(domain)); 1709 intel_display_power_domain_str(domain));
1599 power_domains->domain_use_count[domain]--; 1710 power_domains->domain_use_count[domain]--;
1600 1711
1601 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1712 for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
1602 WARN(!power_well->count, 1713 intel_power_well_put(dev_priv, power_well);
1603 "Use count on power well %s is already zero",
1604 power_well->name);
1605
1606 if (!--power_well->count)
1607 intel_power_well_disable(dev_priv, power_well);
1608 }
1609 1714
1610 mutex_unlock(&power_domains->lock); 1715 mutex_unlock(&power_domains->lock);
1611 1716
@@ -1776,6 +1881,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1776 .is_enabled = gen9_dc_off_power_well_enabled, 1881 .is_enabled = gen9_dc_off_power_well_enabled,
1777}; 1882};
1778 1883
1884static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1885 .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
1886 .enable = bxt_dpio_cmn_power_well_enable,
1887 .disable = bxt_dpio_cmn_power_well_disable,
1888 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1889};
1890
1779static struct i915_power_well hsw_power_wells[] = { 1891static struct i915_power_well hsw_power_wells[] = {
1780 { 1892 {
1781 .name = "always-on", 1893 .name = "always-on",
@@ -2012,6 +2124,18 @@ static struct i915_power_well bxt_power_wells[] = {
2012 .ops = &skl_power_well_ops, 2124 .ops = &skl_power_well_ops,
2013 .data = SKL_DISP_PW_2, 2125 .data = SKL_DISP_PW_2,
2014 }, 2126 },
2127 {
2128 .name = "dpio-common-a",
2129 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2130 .ops = &bxt_dpio_cmn_power_well_ops,
2131 .data = BXT_DPIO_CMN_A,
2132 },
2133 {
2134 .name = "dpio-common-bc",
2135 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2136 .ops = &bxt_dpio_cmn_power_well_ops,
2137 .data = BXT_DPIO_CMN_BC,
2138 },
2015}; 2139};
2016 2140
2017static int 2141static int
@@ -2171,6 +2295,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2171 mutex_unlock(&power_domains->lock); 2295 mutex_unlock(&power_domains->lock);
2172} 2296}
2173 2297
2298static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2299{
2300 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2301 POSTING_READ(DBUF_CTL);
2302
2303 udelay(10);
2304
2305 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2306 DRM_ERROR("DBuf power enable timeout\n");
2307}
2308
2309static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2310{
2311 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2312 POSTING_READ(DBUF_CTL);
2313
2314 udelay(10);
2315
2316 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2317 DRM_ERROR("DBuf power disable timeout!\n");
2318}
2319
2174static void skl_display_core_init(struct drm_i915_private *dev_priv, 2320static void skl_display_core_init(struct drm_i915_private *dev_priv,
2175 bool resume) 2321 bool resume)
2176{ 2322{
@@ -2195,12 +2341,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2195 2341
2196 mutex_unlock(&power_domains->lock); 2342 mutex_unlock(&power_domains->lock);
2197 2343
2198 if (!resume)
2199 return;
2200
2201 skl_init_cdclk(dev_priv); 2344 skl_init_cdclk(dev_priv);
2202 2345
2203 if (dev_priv->csr.dmc_payload) 2346 gen9_dbuf_enable(dev_priv);
2347
2348 if (resume && dev_priv->csr.dmc_payload)
2204 intel_csr_load_program(dev_priv); 2349 intel_csr_load_program(dev_priv);
2205} 2350}
2206 2351
@@ -2211,6 +2356,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2211 2356
2212 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2357 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2213 2358
2359 gen9_dbuf_disable(dev_priv);
2360
2214 skl_uninit_cdclk(dev_priv); 2361 skl_uninit_cdclk(dev_priv);
2215 2362
2216 /* The spec doesn't call for removing the reset handshake flag */ 2363 /* The spec doesn't call for removing the reset handshake flag */
@@ -2254,11 +2401,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
2254 2401
2255 mutex_unlock(&power_domains->lock); 2402 mutex_unlock(&power_domains->lock);
2256 2403
2257 broxton_init_cdclk(dev_priv); 2404 bxt_init_cdclk(dev_priv);
2258 broxton_ddi_phy_init(dev_priv);
2259 2405
2260 broxton_cdclk_verify_state(dev_priv); 2406 gen9_dbuf_enable(dev_priv);
2261 broxton_ddi_phy_verify_state(dev_priv);
2262 2407
2263 if (resume && dev_priv->csr.dmc_payload) 2408 if (resume && dev_priv->csr.dmc_payload)
2264 intel_csr_load_program(dev_priv); 2409 intel_csr_load_program(dev_priv);
@@ -2271,8 +2416,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2271 2416
2272 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2417 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2273 2418
2274 broxton_ddi_phy_uninit(dev_priv); 2419 gen9_dbuf_disable(dev_priv);
2275 broxton_uninit_cdclk(dev_priv); 2420
2421 bxt_uninit_cdclk(dev_priv);
2276 2422
2277 /* The spec doesn't call for removing the reset handshake flag */ 2423 /* The spec doesn't call for removing the reset handshake flag */
2278 2424
@@ -2403,6 +2549,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2403/** 2549/**
2404 * intel_power_domains_init_hw - initialize hardware power domain state 2550 * intel_power_domains_init_hw - initialize hardware power domain state
2405 * @dev_priv: i915 device instance 2551 * @dev_priv: i915 device instance
2552 * @resume: Called from resume code paths or not
2406 * 2553 *
2407 * This function initializes the hardware power domain state and enables all 2554 * This function initializes the hardware power domain state and enables all
2408 * power domains using intel_display_set_init_power(). 2555 * power domains using intel_display_set_init_power().
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..02b4a6695528 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2177,12 +2177,23 @@ done:
2177#undef CHECK_PROPERTY 2177#undef CHECK_PROPERTY
2178} 2178}
2179 2179
2180static void
2181intel_sdvo_connector_unregister(struct drm_connector *connector)
2182{
2183 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
2184
2185 sysfs_remove_link(&connector->kdev->kobj,
2186 sdvo->ddc.dev.kobj.name);
2187 intel_connector_unregister(connector);
2188}
2189
2180static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2190static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2181 .dpms = drm_atomic_helper_connector_dpms, 2191 .dpms = drm_atomic_helper_connector_dpms,
2182 .detect = intel_sdvo_detect, 2192 .detect = intel_sdvo_detect,
2183 .fill_modes = drm_helper_probe_single_connector_modes, 2193 .fill_modes = drm_helper_probe_single_connector_modes,
2184 .set_property = intel_sdvo_set_property, 2194 .set_property = intel_sdvo_set_property,
2185 .atomic_get_property = intel_connector_atomic_get_property, 2195 .atomic_get_property = intel_connector_atomic_get_property,
2196 .early_unregister = intel_sdvo_connector_unregister,
2186 .destroy = intel_sdvo_destroy, 2197 .destroy = intel_sdvo_destroy,
2187 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2198 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2188 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 2199 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2191,7 +2202,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2191static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2202static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2192 .get_modes = intel_sdvo_get_modes, 2203 .get_modes = intel_sdvo_get_modes,
2193 .mode_valid = intel_sdvo_mode_valid, 2204 .mode_valid = intel_sdvo_mode_valid,
2194 .best_encoder = intel_best_encoder,
2195}; 2205};
2196 2206
2197static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2207static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2346,20 +2356,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2346 return 0x72; 2356 return 0x72;
2347} 2357}
2348 2358
2349static void
2350intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
2351{
2352 struct drm_connector *drm_connector;
2353 struct intel_sdvo *sdvo_encoder;
2354
2355 drm_connector = &intel_connector->base;
2356 sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
2357
2358 sysfs_remove_link(&drm_connector->kdev->kobj,
2359 sdvo_encoder->ddc.dev.kobj.name);
2360 intel_connector_unregister(intel_connector);
2361}
2362
2363static int 2359static int
2364intel_sdvo_connector_init(struct intel_sdvo_connector *connector, 2360intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2365 struct intel_sdvo *encoder) 2361 struct intel_sdvo *encoder)
@@ -2382,7 +2378,6 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2382 connector->base.base.doublescan_allowed = 0; 2378 connector->base.base.doublescan_allowed = 0;
2383 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2379 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2384 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; 2380 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2385 connector->base.unregister = intel_sdvo_connector_unregister;
2386 2381
2387 intel_connector_attach_encoder(&connector->base, &encoder->base); 2382 intel_connector_attach_encoder(&connector->base, &encoder->base);
2388 ret = drm_connector_register(drm_connector); 2383 ret = drm_connector_register(drm_connector);
@@ -2981,7 +2976,7 @@ bool intel_sdvo_init(struct drm_device *dev,
2981 intel_encoder = &intel_sdvo->base; 2976 intel_encoder = &intel_sdvo->base;
2982 intel_encoder->type = INTEL_OUTPUT_SDVO; 2977 intel_encoder->type = INTEL_OUTPUT_SDVO;
2983 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, 2978 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2984 NULL); 2979 "SDVO %c", port_name(port));
2985 2980
2986 /* Read the regs to test if we can talk to the device */ 2981 /* Read the regs to test if we can talk to the device */
2987 for (i = 0; i < 0x40; i++) { 2982 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..fc654173c491 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
80 */ 80 */
81void intel_pipe_update_start(struct intel_crtc *crtc) 81void intel_pipe_update_start(struct intel_crtc *crtc)
82{ 82{
83 struct drm_device *dev = crtc->base.dev;
84 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 83 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
85 enum pipe pipe = crtc->pipe;
86 long timeout = msecs_to_jiffies_timeout(1); 84 long timeout = msecs_to_jiffies_timeout(1);
87 int scanline, min, max, vblank_start; 85 int scanline, min, max, vblank_start;
88 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 137
140 crtc->debug.scanline_start = scanline; 138 crtc->debug.scanline_start = scanline;
141 crtc->debug.start_vbl_time = ktime_get(); 139 crtc->debug.start_vbl_time = ktime_get();
142 crtc->debug.start_vbl_count = 140 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
143 dev->driver->get_vblank_counter(dev, pipe);
144 141
145 trace_i915_pipe_update_vblank_evaded(crtc); 142 trace_i915_pipe_update_vblank_evaded(crtc);
146} 143}
@@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
154 * re-enables interrupts and verifies the update was actually completed 151 * re-enables interrupts and verifies the update was actually completed
155 * before a vblank using the value of @start_vbl_count. 152 * before a vblank using the value of @start_vbl_count.
156 */ 153 */
157void intel_pipe_update_end(struct intel_crtc *crtc) 154void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
158{ 155{
159 struct drm_device *dev = crtc->base.dev;
160 enum pipe pipe = crtc->pipe; 156 enum pipe pipe = crtc->pipe;
161 int scanline_end = intel_get_crtc_scanline(crtc); 157 int scanline_end = intel_get_crtc_scanline(crtc);
162 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 158 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
163 ktime_t end_vbl_time = ktime_get(); 159 ktime_t end_vbl_time = ktime_get();
164 160
161 if (work) {
162 work->flip_queued_vblank = end_vbl_count;
163 smp_mb__before_atomic();
164 atomic_set(&work->pending, 1);
165 }
166
165 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); 167 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
166 168
169 /* We're still in the vblank-evade critical section, this can't race.
170 * Would be slightly nice to just grab the vblank count and arm the
171 * event outside of the critical section - the spinlock might spin for a
172 * while ... */
173 if (crtc->base.state->event) {
174 WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
175
176 spin_lock(&crtc->base.dev->event_lock);
177 drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
178 spin_unlock(&crtc->base.dev->event_lock);
179
180 crtc->base.state->event = NULL;
181 }
182
167 local_irq_enable(); 183 local_irq_enable();
168 184
169 if (crtc->debug.start_vbl_count && 185 if (crtc->debug.start_vbl_count &&
@@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane,
203 uint32_t y = plane_state->src.y1 >> 16; 219 uint32_t y = plane_state->src.y1 >> 16;
204 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; 220 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
205 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; 221 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
206 const struct intel_scaler *scaler =
207 &crtc_state->scaler_state.scalers[plane_state->scaler_id];
208 222
209 plane_ctl = PLANE_CTL_ENABLE | 223 plane_ctl = PLANE_CTL_ENABLE |
210 PLANE_CTL_PIPE_GAMMA_ENABLE | 224 PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane,
260 274
261 /* program plane scaler */ 275 /* program plane scaler */
262 if (plane_state->scaler_id >= 0) { 276 if (plane_state->scaler_id >= 0) {
263 uint32_t ps_ctrl = 0;
264 int scaler_id = plane_state->scaler_id; 277 int scaler_id = plane_state->scaler_id;
278 const struct intel_scaler *scaler;
265 279
266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, 280 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
267 PS_PLANE_SEL(plane)); 281 PS_PLANE_SEL(plane));
268 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; 282
269 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 283 scaler = &crtc_state->scaler_state.scalers[scaler_id];
284
285 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
286 PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
270 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 287 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
271 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); 288 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
272 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), 289 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1111 1128
1112 possible_crtcs = (1 << pipe); 1129 possible_crtcs = (1 << pipe);
1113 1130
1114 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1131 if (INTEL_INFO(dev)->gen >= 9)
1115 &intel_plane_funcs, 1132 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1116 plane_formats, num_plane_formats, 1133 &intel_plane_funcs,
1117 DRM_PLANE_TYPE_OVERLAY, NULL); 1134 plane_formats, num_plane_formats,
1135 DRM_PLANE_TYPE_OVERLAY,
1136 "plane %d%c", plane + 2, pipe_name(pipe));
1137 else
1138 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1139 &intel_plane_funcs,
1140 plane_formats, num_plane_formats,
1141 DRM_PLANE_TYPE_OVERLAY,
1142 "sprite %c", sprite_name(pipe, plane));
1118 if (ret) 1143 if (ret)
1119 goto fail; 1144 goto fail;
1120 1145
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..4ce70a9f9df2 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1501,6 +1501,7 @@ out:
1501static const struct drm_connector_funcs intel_tv_connector_funcs = { 1501static const struct drm_connector_funcs intel_tv_connector_funcs = {
1502 .dpms = drm_atomic_helper_connector_dpms, 1502 .dpms = drm_atomic_helper_connector_dpms,
1503 .detect = intel_tv_detect, 1503 .detect = intel_tv_detect,
1504 .early_unregister = intel_connector_unregister,
1504 .destroy = intel_tv_destroy, 1505 .destroy = intel_tv_destroy,
1505 .set_property = intel_tv_set_property, 1506 .set_property = intel_tv_set_property,
1506 .atomic_get_property = intel_connector_atomic_get_property, 1507 .atomic_get_property = intel_connector_atomic_get_property,
@@ -1512,7 +1513,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1512static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1513static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1513 .mode_valid = intel_tv_mode_valid, 1514 .mode_valid = intel_tv_mode_valid,
1514 .get_modes = intel_tv_get_modes, 1515 .get_modes = intel_tv_get_modes,
1515 .best_encoder = intel_best_encoder,
1516}; 1516};
1517 1517
1518static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1518static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1591,7 +1591,7 @@ intel_tv_init(struct drm_device *dev)
1591 DRM_MODE_CONNECTOR_SVIDEO); 1591 DRM_MODE_CONNECTOR_SVIDEO);
1592 1592
1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1594 DRM_MODE_ENCODER_TVDAC, NULL); 1594 DRM_MODE_ENCODER_TVDAC, "TV");
1595 1595
1596 intel_encoder->compute_config = intel_tv_compute_config; 1596 intel_encoder->compute_config = intel_tv_compute_config;
1597 intel_encoder->get_config = intel_tv_get_config; 1597 intel_encoder->get_config = intel_tv_get_config;
@@ -1600,7 +1600,6 @@ intel_tv_init(struct drm_device *dev)
1600 intel_encoder->disable = intel_disable_tv; 1600 intel_encoder->disable = intel_disable_tv;
1601 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1601 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1602 intel_connector->get_hw_state = intel_connector_get_hw_state; 1602 intel_connector->get_hw_state = intel_connector_get_hw_state;
1603 intel_connector->unregister = intel_connector_unregister;
1604 1603
1605 intel_connector_attach_encoder(intel_connector, intel_encoder); 1604 intel_connector_attach_encoder(intel_connector, intel_encoder);
1606 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1605 intel_encoder->type = INTEL_OUTPUT_TVOUT;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..c1ca458d688e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
248 return HRTIMER_NORESTART; 248 return HRTIMER_NORESTART;
249} 249}
250 250
251void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 251void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
252 bool restore)
252{ 253{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 unsigned long irqflags; 254 unsigned long irqflags;
255 struct intel_uncore_forcewake_domain *domain; 255 struct intel_uncore_forcewake_domain *domain;
256 int retry_count = 100; 256 int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
304 if (fw) 304 if (fw)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
306 306
307 if (IS_GEN6(dev) || IS_GEN7(dev)) 307 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
308 dev_priv->uncore.fifo_count = 308 dev_priv->uncore.fifo_count =
309 fifo_free_entries(dev_priv); 309 fifo_free_entries(dev_priv);
310 } 310 }
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
400 return false; 400 return false;
401} 401}
402 402
403static void __intel_uncore_early_sanitize(struct drm_device *dev, 403static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
404 bool restore_forcewake) 404 bool restore_forcewake)
405{ 405{
406 struct drm_i915_private *dev_priv = dev->dev_private;
407
408 /* clear out unclaimed reg detection bit */ 406 /* clear out unclaimed reg detection bit */
409 if (check_for_unclaimed_mmio(dev_priv)) 407 if (check_for_unclaimed_mmio(dev_priv))
410 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 408 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
411 409
412 /* clear out old GT FIFO errors */ 410 /* clear out old GT FIFO errors */
413 if (IS_GEN6(dev) || IS_GEN7(dev)) 411 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
414 __raw_i915_write32(dev_priv, GTFIFODBG, 412 __raw_i915_write32(dev_priv, GTFIFODBG,
415 __raw_i915_read32(dev_priv, GTFIFODBG)); 413 __raw_i915_read32(dev_priv, GTFIFODBG));
416 414
417 /* WaDisableShadowRegForCpd:chv */ 415 /* WaDisableShadowRegForCpd:chv */
418 if (IS_CHERRYVIEW(dev)) { 416 if (IS_CHERRYVIEW(dev_priv)) {
419 __raw_i915_write32(dev_priv, GTFIFOCTL, 417 __raw_i915_write32(dev_priv, GTFIFOCTL,
420 __raw_i915_read32(dev_priv, GTFIFOCTL) | 418 __raw_i915_read32(dev_priv, GTFIFOCTL) |
421 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 419 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
422 GT_FIFO_CTL_RC6_POLICY_STALL); 420 GT_FIFO_CTL_RC6_POLICY_STALL);
423 } 421 }
424 422
425 intel_uncore_forcewake_reset(dev, restore_forcewake); 423 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
426} 424}
427 425
428void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 426void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
427 bool restore_forcewake)
429{ 428{
430 __intel_uncore_early_sanitize(dev, restore_forcewake); 429 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
431 i915_check_and_clear_faults(dev); 430 i915_check_and_clear_faults(dev_priv);
432} 431}
433 432
434void intel_uncore_sanitize(struct drm_device *dev) 433void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
435{ 434{
436 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 435 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
437 436
438 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 437 /* BIOS often leaves RC6 enabled, but disable it for hw init */
439 intel_disable_gt_powersave(dev); 438 intel_disable_gt_powersave(dev_priv);
440} 439}
441 440
442static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 441static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1233 fw_domain_reset(d); 1232 fw_domain_reset(d);
1234} 1233}
1235 1234
1236static void intel_uncore_fw_domains_init(struct drm_device *dev) 1235static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1237{ 1236{
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240 if (INTEL_INFO(dev_priv)->gen <= 5) 1237 if (INTEL_INFO(dev_priv)->gen <= 5)
1241 return; 1238 return;
1242 1239
1243 if (IS_GEN9(dev)) { 1240 if (IS_GEN9(dev_priv)) {
1244 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1241 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1245 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1242 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1243 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1251 FORCEWAKE_ACK_BLITTER_GEN9); 1248 FORCEWAKE_ACK_BLITTER_GEN9);
1252 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1249 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1253 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1250 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1254 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1251 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1255 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1252 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1256 if (!IS_CHERRYVIEW(dev)) 1253 if (!IS_CHERRYVIEW(dev_priv))
1257 dev_priv->uncore.funcs.force_wake_put = 1254 dev_priv->uncore.funcs.force_wake_put =
1258 fw_domains_put_with_fifo; 1255 fw_domains_put_with_fifo;
1259 else 1256 else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1262 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1259 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1263 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1260 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1264 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1261 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1265 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1262 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1266 dev_priv->uncore.funcs.force_wake_get = 1263 dev_priv->uncore.funcs.force_wake_get =
1267 fw_domains_get_with_thread_status; 1264 fw_domains_get_with_thread_status;
1268 if (IS_HASWELL(dev)) 1265 if (IS_HASWELL(dev_priv))
1269 dev_priv->uncore.funcs.force_wake_put = 1266 dev_priv->uncore.funcs.force_wake_put =
1270 fw_domains_put_with_fifo; 1267 fw_domains_put_with_fifo;
1271 else 1268 else
1272 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1269 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1273 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1270 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1274 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1271 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1275 } else if (IS_IVYBRIDGE(dev)) { 1272 } else if (IS_IVYBRIDGE(dev_priv)) {
1276 u32 ecobus; 1273 u32 ecobus;
1277 1274
1278 /* IVB configs may use multi-threaded forcewake */ 1275 /* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1302 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1303 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1304 1301
1305 mutex_lock(&dev->struct_mutex);
1306 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1302 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1307 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1303 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1308 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1304 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1309 mutex_unlock(&dev->struct_mutex);
1310 1305
1311 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1306 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1312 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1307 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1314 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1309 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1315 FORCEWAKE, FORCEWAKE_ACK); 1310 FORCEWAKE, FORCEWAKE_ACK);
1316 } 1311 }
1317 } else if (IS_GEN6(dev)) { 1312 } else if (IS_GEN6(dev_priv)) {
1318 dev_priv->uncore.funcs.force_wake_get = 1313 dev_priv->uncore.funcs.force_wake_get =
1319 fw_domains_get_with_thread_status; 1314 fw_domains_get_with_thread_status;
1320 dev_priv->uncore.funcs.force_wake_put = 1315 dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1327 WARN_ON(dev_priv->uncore.fw_domains == 0); 1322 WARN_ON(dev_priv->uncore.fw_domains == 0);
1328} 1323}
1329 1324
1330void intel_uncore_init(struct drm_device *dev) 1325void intel_uncore_init(struct drm_i915_private *dev_priv)
1331{ 1326{
1332 struct drm_i915_private *dev_priv = dev->dev_private; 1327 i915_check_vgpu(dev_priv);
1333
1334 i915_check_vgpu(dev);
1335 1328
1336 intel_uncore_edram_detect(dev_priv); 1329 intel_uncore_edram_detect(dev_priv);
1337 intel_uncore_fw_domains_init(dev); 1330 intel_uncore_fw_domains_init(dev_priv);
1338 __intel_uncore_early_sanitize(dev, false); 1331 __intel_uncore_early_sanitize(dev_priv, false);
1339 1332
1340 dev_priv->uncore.unclaimed_mmio_check = 1; 1333 dev_priv->uncore.unclaimed_mmio_check = 1;
1341 1334
1342 switch (INTEL_INFO(dev)->gen) { 1335 switch (INTEL_INFO(dev_priv)->gen) {
1343 default: 1336 default:
1344 case 9: 1337 case 9:
1345 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1338 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1346 ASSIGN_READ_MMIO_VFUNCS(gen9); 1339 ASSIGN_READ_MMIO_VFUNCS(gen9);
1347 break; 1340 break;
1348 case 8: 1341 case 8:
1349 if (IS_CHERRYVIEW(dev)) { 1342 if (IS_CHERRYVIEW(dev_priv)) {
1350 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1343 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1351 ASSIGN_READ_MMIO_VFUNCS(chv); 1344 ASSIGN_READ_MMIO_VFUNCS(chv);
1352 1345
@@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
1357 break; 1350 break;
1358 case 7: 1351 case 7:
1359 case 6: 1352 case 6:
1360 if (IS_HASWELL(dev)) { 1353 if (IS_HASWELL(dev_priv)) {
1361 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1354 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1362 } else { 1355 } else {
1363 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1356 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1364 } 1357 }
1365 1358
1366 if (IS_VALLEYVIEW(dev)) { 1359 if (IS_VALLEYVIEW(dev_priv)) {
1367 ASSIGN_READ_MMIO_VFUNCS(vlv); 1360 ASSIGN_READ_MMIO_VFUNCS(vlv);
1368 } else { 1361 } else {
1369 ASSIGN_READ_MMIO_VFUNCS(gen6); 1362 ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev)
1381 break; 1374 break;
1382 } 1375 }
1383 1376
1384 if (intel_vgpu_active(dev)) { 1377 if (intel_vgpu_active(dev_priv)) {
1385 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1378 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1386 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1379 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1387 } 1380 }
1388 1381
1389 i915_check_and_clear_faults(dev); 1382 i915_check_and_clear_faults(dev_priv);
1390} 1383}
1391#undef ASSIGN_WRITE_MMIO_VFUNCS 1384#undef ASSIGN_WRITE_MMIO_VFUNCS
1392#undef ASSIGN_READ_MMIO_VFUNCS 1385#undef ASSIGN_READ_MMIO_VFUNCS
1393 1386
1394void intel_uncore_fini(struct drm_device *dev) 1387void intel_uncore_fini(struct drm_i915_private *dev_priv)
1395{ 1388{
1396 /* Paranoia: make sure we have disabled everything before we exit. */ 1389 /* Paranoia: make sure we have disabled everything before we exit. */
1397 intel_uncore_sanitize(dev); 1390 intel_uncore_sanitize(dev_priv);
1398 intel_uncore_forcewake_reset(dev, false); 1391 intel_uncore_forcewake_reset(dev_priv, false);
1399} 1392}
1400 1393
1401#define GEN_RANGE(l, h) GENMASK(h, l) 1394#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1402 1395
1403static const struct register_whitelist { 1396static const struct register_whitelist {
1404 i915_reg_t offset_ldw, offset_udw; 1397 i915_reg_t offset_ldw, offset_udw;
@@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1423 1416
1424 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1417 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1425 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1418 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1426 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1419 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1427 break; 1420 break;
1428 } 1421 }
1429 1422
@@ -1467,83 +1460,47 @@ out:
1467 return ret; 1460 return ret;
1468} 1461}
1469 1462
1470int i915_get_reset_stats_ioctl(struct drm_device *dev, 1463static int i915_reset_complete(struct pci_dev *pdev)
1471 void *data, struct drm_file *file)
1472{
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1474 struct drm_i915_reset_stats *args = data;
1475 struct i915_ctx_hang_stats *hs;
1476 struct intel_context *ctx;
1477 int ret;
1478
1479 if (args->flags || args->pad)
1480 return -EINVAL;
1481
1482 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1483 return -EPERM;
1484
1485 ret = mutex_lock_interruptible(&dev->struct_mutex);
1486 if (ret)
1487 return ret;
1488
1489 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1490 if (IS_ERR(ctx)) {
1491 mutex_unlock(&dev->struct_mutex);
1492 return PTR_ERR(ctx);
1493 }
1494 hs = &ctx->hang_stats;
1495
1496 if (capable(CAP_SYS_ADMIN))
1497 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1498 else
1499 args->reset_count = 0;
1500
1501 args->batch_active = hs->batch_active;
1502 args->batch_pending = hs->batch_pending;
1503
1504 mutex_unlock(&dev->struct_mutex);
1505
1506 return 0;
1507}
1508
1509static int i915_reset_complete(struct drm_device *dev)
1510{ 1464{
1511 u8 gdrst; 1465 u8 gdrst;
1512 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1466 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1513 return (gdrst & GRDOM_RESET_STATUS) == 0; 1467 return (gdrst & GRDOM_RESET_STATUS) == 0;
1514} 1468}
1515 1469
1516static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) 1470static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1517{ 1471{
1472 struct pci_dev *pdev = dev_priv->dev->pdev;
1473
1518 /* assert reset for at least 20 usec */ 1474 /* assert reset for at least 20 usec */
1519 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1475 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1520 udelay(20); 1476 udelay(20);
1521 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1477 pci_write_config_byte(pdev, I915_GDRST, 0);
1522 1478
1523 return wait_for(i915_reset_complete(dev), 500); 1479 return wait_for(i915_reset_complete(pdev), 500);
1524} 1480}
1525 1481
1526static int g4x_reset_complete(struct drm_device *dev) 1482static int g4x_reset_complete(struct pci_dev *pdev)
1527{ 1483{
1528 u8 gdrst; 1484 u8 gdrst;
1529 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1485 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1530 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1486 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1531} 1487}
1532 1488
1533static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) 1489static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1534{ 1490{
1535 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1491 struct pci_dev *pdev = dev_priv->dev->pdev;
1536 return wait_for(g4x_reset_complete(dev), 500); 1492 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1493 return wait_for(g4x_reset_complete(pdev), 500);
1537} 1494}
1538 1495
1539static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) 1496static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1540{ 1497{
1541 struct drm_i915_private *dev_priv = dev->dev_private; 1498 struct pci_dev *pdev = dev_priv->dev->pdev;
1542 int ret; 1499 int ret;
1543 1500
1544 pci_write_config_byte(dev->pdev, I915_GDRST, 1501 pci_write_config_byte(pdev, I915_GDRST,
1545 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1502 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1546 ret = wait_for(g4x_reset_complete(dev), 500); 1503 ret = wait_for(g4x_reset_complete(pdev), 500);
1547 if (ret) 1504 if (ret)
1548 return ret; 1505 return ret;
1549 1506
@@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1551 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1508 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1552 POSTING_READ(VDECCLK_GATE_D); 1509 POSTING_READ(VDECCLK_GATE_D);
1553 1510
1554 pci_write_config_byte(dev->pdev, I915_GDRST, 1511 pci_write_config_byte(pdev, I915_GDRST,
1555 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1512 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1556 ret = wait_for(g4x_reset_complete(dev), 500); 1513 ret = wait_for(g4x_reset_complete(pdev), 500);
1557 if (ret) 1514 if (ret)
1558 return ret; 1515 return ret;
1559 1516
@@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1561 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1518 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1562 POSTING_READ(VDECCLK_GATE_D); 1519 POSTING_READ(VDECCLK_GATE_D);
1563 1520
1564 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1521 pci_write_config_byte(pdev, I915_GDRST, 0);
1565 1522
1566 return 0; 1523 return 0;
1567} 1524}
1568 1525
1569static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) 1526static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1527 unsigned engine_mask)
1570{ 1528{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572 int ret; 1529 int ret;
1573 1530
1574 I915_WRITE(ILK_GDSR, 1531 I915_WRITE(ILK_GDSR,
@@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1612 1569
1613/** 1570/**
1614 * gen6_reset_engines - reset individual engines 1571 * gen6_reset_engines - reset individual engines
1615 * @dev: DRM device 1572 * @dev_priv: i915 device
1616 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1573 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1617 * 1574 *
1618 * This function will reset the individual engines that are set in engine_mask. 1575 * This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1623 * 1580 *
1624 * Returns 0 on success, nonzero on error. 1581 * Returns 0 on success, nonzero on error.
1625 */ 1582 */
1626static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) 1583static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1584 unsigned engine_mask)
1627{ 1585{
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 struct intel_engine_cs *engine; 1586 struct intel_engine_cs *engine;
1630 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1587 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1631 [RCS] = GEN6_GRDOM_RENDER, 1588 [RCS] = GEN6_GRDOM_RENDER,
@@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
1647 1604
1648 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1605 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1649 1606
1650 intel_uncore_forcewake_reset(dev, true); 1607 intel_uncore_forcewake_reset(dev_priv, true);
1651 1608
1652 return ret; 1609 return ret;
1653} 1610}
@@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
1663 1620
1664static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1621static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1665{ 1622{
1623 struct drm_i915_private *dev_priv = engine->i915;
1666 int ret; 1624 int ret;
1667 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1668 1625
1669 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1626 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1627 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1682 1639
1683static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1640static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1684{ 1641{
1685 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1642 struct drm_i915_private *dev_priv = engine->i915;
1686 1643
1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1644 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1688 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1645 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1689} 1646}
1690 1647
1691static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) 1648static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1649 unsigned engine_mask)
1692{ 1650{
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1694 struct intel_engine_cs *engine; 1651 struct intel_engine_cs *engine;
1695 1652
1696 for_each_engine_masked(engine, dev_priv, engine_mask) 1653 for_each_engine_masked(engine, dev_priv, engine_mask)
1697 if (gen8_request_engine_reset(engine)) 1654 if (gen8_request_engine_reset(engine))
1698 goto not_ready; 1655 goto not_ready;
1699 1656
1700 return gen6_reset_engines(dev, engine_mask); 1657 return gen6_reset_engines(dev_priv, engine_mask);
1701 1658
1702not_ready: 1659not_ready:
1703 for_each_engine_masked(engine, dev_priv, engine_mask) 1660 for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1663,35 @@ not_ready:
1706 return -EIO; 1663 return -EIO;
1707} 1664}
1708 1665
1709static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, 1666typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1710 unsigned engine_mask) 1667
1668static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1711{ 1669{
1712 if (!i915.reset) 1670 if (!i915.reset)
1713 return NULL; 1671 return NULL;
1714 1672
1715 if (INTEL_INFO(dev)->gen >= 8) 1673 if (INTEL_INFO(dev_priv)->gen >= 8)
1716 return gen8_reset_engines; 1674 return gen8_reset_engines;
1717 else if (INTEL_INFO(dev)->gen >= 6) 1675 else if (INTEL_INFO(dev_priv)->gen >= 6)
1718 return gen6_reset_engines; 1676 return gen6_reset_engines;
1719 else if (IS_GEN5(dev)) 1677 else if (IS_GEN5(dev_priv))
1720 return ironlake_do_reset; 1678 return ironlake_do_reset;
1721 else if (IS_G4X(dev)) 1679 else if (IS_G4X(dev_priv))
1722 return g4x_do_reset; 1680 return g4x_do_reset;
1723 else if (IS_G33(dev)) 1681 else if (IS_G33(dev_priv))
1724 return g33_do_reset; 1682 return g33_do_reset;
1725 else if (INTEL_INFO(dev)->gen >= 3) 1683 else if (INTEL_INFO(dev_priv)->gen >= 3)
1726 return i915_do_reset; 1684 return i915_do_reset;
1727 else 1685 else
1728 return NULL; 1686 return NULL;
1729} 1687}
1730 1688
1731int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) 1689int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1732{ 1690{
1733 struct drm_i915_private *dev_priv = to_i915(dev); 1691 reset_func reset;
1734 int (*reset)(struct drm_device *, unsigned);
1735 int ret; 1692 int ret;
1736 1693
1737 reset = intel_get_gpu_reset(dev); 1694 reset = intel_get_gpu_reset(dev_priv);
1738 if (reset == NULL) 1695 if (reset == NULL)
1739 return -ENODEV; 1696 return -ENODEV;
1740 1697
@@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
1742 * request may be dropped and never completes (causing -EIO). 1699 * request may be dropped and never completes (causing -EIO).
1743 */ 1700 */
1744 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1701 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1745 ret = reset(dev, engine_mask); 1702 ret = reset(dev_priv, engine_mask);
1746 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1703 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1747 1704
1748 return ret; 1705 return ret;
1749} 1706}
1750 1707
1751bool intel_has_gpu_reset(struct drm_device *dev) 1708bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1752{ 1709{
1753 return intel_get_gpu_reset(dev) != NULL; 1710 return intel_get_gpu_reset(dev_priv) != NULL;
1754} 1711}
1755 1712
1756int intel_guc_reset(struct drm_i915_private *dev_priv) 1713int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
1758 int ret; 1715 int ret;
1759 unsigned long irqflags; 1716 unsigned long irqflags;
1760 1717
1761 if (!i915.enable_guc_submission) 1718 if (!HAS_GUC(dev_priv))
1762 return -EINVAL; 1719 return -EINVAL;
1763 1720
1764 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1721 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1802{ 1759{
1803 enum forcewake_domains fw_domains; 1760 enum forcewake_domains fw_domains;
1804 1761
1805 if (intel_vgpu_active(dev_priv->dev)) 1762 if (intel_vgpu_active(dev_priv))
1806 return 0; 1763 return 0;
1807 1764
1808 switch (INTEL_INFO(dev_priv)->gen) { 1765 switch (INTEL_GEN(dev_priv)) {
1809 case 9: 1766 case 9:
1810 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); 1767 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1811 break; 1768 break;
@@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1842{ 1799{
1843 enum forcewake_domains fw_domains; 1800 enum forcewake_domains fw_domains;
1844 1801
1845 if (intel_vgpu_active(dev_priv->dev)) 1802 if (intel_vgpu_active(dev_priv))
1846 return 0; 1803 return 0;
1847 1804
1848 switch (INTEL_INFO(dev_priv)->gen) { 1805 switch (INTEL_GEN(dev_priv)) {
1849 case 9: 1806 case 9:
1850 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); 1807 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1851 break; 1808 break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index c15051de8023..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
403 u8 vsync_off:4; 403 u8 vsync_off:4;
404 u8 rsvd0:6; 404 u8 rsvd0:6;
405 u8 hsync_off_hi:2; 405 u8 hsync_off_hi:2;
406 u8 h_image; 406 u8 himage_lo;
407 u8 v_image; 407 u8 vimage_lo;
408 u8 max_hv; 408 u8 vimage_hi:4;
409 u8 himage_hi:4;
409 u8 h_border; 410 u8 h_border;
410 u8 v_border; 411 u8 v_border;
411 u8 rsvd1:3; 412 u8 rsvd1:3;
@@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
446 u8 obsolete3; 447 u8 obsolete3;
447} __packed; 448} __packed;
448 449
450struct bdb_lfp_backlight_control_method {
451 u8 type:4;
452 u8 controller:4;
453} __packed;
454
449struct bdb_lfp_backlight_data { 455struct bdb_lfp_backlight_data {
450 u8 entry_size; 456 u8 entry_size;
451 struct bdb_lfp_backlight_data_entry data[16]; 457 struct bdb_lfp_backlight_data_entry data[16];
452 u8 level[16]; 458 u8 level[16];
459 struct bdb_lfp_backlight_control_method backlight_control[16];
453} __packed; 460} __packed;
454 461
455struct aimdb_header { 462struct aimdb_header {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1f14b602882b..7746418a4c08 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
97 return NULL; 97 return NULL;
98} 98}
99 99
100int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, 100int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
101 int hsync_pin, int vsync_pin) 101 int hsync_pin, int vsync_pin, u32 bus_flags)
102{ 102{
103 struct imx_drm_crtc_helper_funcs *helper; 103 struct imx_drm_crtc_helper_funcs *helper;
104 struct imx_drm_crtc *imx_crtc; 104 struct imx_drm_crtc *imx_crtc;
@@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
110 helper = &imx_crtc->imx_drm_helper_funcs; 110 helper = &imx_crtc->imx_drm_helper_funcs;
111 if (helper->set_interface_pix_fmt) 111 if (helper->set_interface_pix_fmt)
112 return helper->set_interface_pix_fmt(encoder->crtc, 112 return helper->set_interface_pix_fmt(encoder->crtc,
113 bus_format, hsync_pin, vsync_pin); 113 bus_format, hsync_pin, vsync_pin,
114 bus_flags);
114 return 0; 115 return 0;
115} 116}
116EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); 117EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
117 118
118int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) 119int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
119{ 120{
120 return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); 121 return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
122 DRM_BUS_FLAG_DE_HIGH |
123 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
121} 124}
122EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); 125EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
123 126
@@ -404,7 +407,6 @@ static struct drm_driver imx_drm_driver = {
404 .load = imx_drm_driver_load, 407 .load = imx_drm_driver_load,
405 .unload = imx_drm_driver_unload, 408 .unload = imx_drm_driver_unload,
406 .lastclose = imx_drm_driver_lastclose, 409 .lastclose = imx_drm_driver_lastclose,
407 .set_busid = drm_platform_set_busid,
408 .gem_free_object_unlocked = drm_gem_cma_free_object, 410 .gem_free_object_unlocked = drm_gem_cma_free_object,
409 .gem_vm_ops = &drm_gem_cma_vm_ops, 411 .gem_vm_ops = &drm_gem_cma_vm_ops,
410 .dumb_create = drm_gem_cma_dumb_create, 412 .dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index b0241b9d1334..74320a1723b7 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs {
19 int (*enable_vblank)(struct drm_crtc *crtc); 19 int (*enable_vblank)(struct drm_crtc *crtc);
20 void (*disable_vblank)(struct drm_crtc *crtc); 20 void (*disable_vblank)(struct drm_crtc *crtc);
21 int (*set_interface_pix_fmt)(struct drm_crtc *crtc, 21 int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
22 u32 bus_format, int hsync_pin, int vsync_pin); 22 u32 bus_format, int hsync_pin, int vsync_pin,
23 u32 bus_flags);
23 const struct drm_crtc_helper_funcs *crtc_helper_funcs; 24 const struct drm_crtc_helper_funcs *crtc_helper_funcs;
24 const struct drm_crtc_funcs *crtc_funcs; 25 const struct drm_crtc_funcs *crtc_funcs;
25}; 26};
@@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm);
41 42
42struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); 43struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
43 44
44int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, 45int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
45 u32 bus_format, int hsync_pin, int vsync_pin); 46 int hsync_pin, int vsync_pin, u32 bus_flags);
46int imx_drm_set_bus_format(struct drm_encoder *encoder, 47int imx_drm_set_bus_format(struct drm_encoder *encoder,
47 u32 bus_format); 48 u32 bus_format);
48 49
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index a58eee59550a..beff793bb717 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -25,6 +25,7 @@
25#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 25#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/of_graph.h> 27#include <linux/of_graph.h>
28#include <video/of_display_timing.h>
28#include <video/of_videomode.h> 29#include <video/of_videomode.h>
29#include <linux/regmap.h> 30#include <linux/regmap.h>
30#include <linux/videodev2.h> 31#include <linux/videodev2.h>
@@ -59,6 +60,7 @@ struct imx_ldb_channel {
59 struct drm_encoder encoder; 60 struct drm_encoder encoder;
60 struct drm_panel *panel; 61 struct drm_panel *panel;
61 struct device_node *child; 62 struct device_node *child;
63 struct i2c_adapter *ddc;
62 int chno; 64 int chno;
63 void *edid; 65 void *edid;
64 int edid_len; 66 int edid_len;
@@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
107 return num_modes; 109 return num_modes;
108 } 110 }
109 111
112 if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
113 imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
114
110 if (imx_ldb_ch->edid) { 115 if (imx_ldb_ch->edid) {
111 drm_mode_connector_update_edid_property(connector, 116 drm_mode_connector_update_edid_property(connector,
112 imx_ldb_ch->edid); 117 imx_ldb_ch->edid);
@@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
553 558
554 for_each_child_of_node(np, child) { 559 for_each_child_of_node(np, child) {
555 struct imx_ldb_channel *channel; 560 struct imx_ldb_channel *channel;
556 struct device_node *port; 561 struct device_node *ddc_node;
562 struct device_node *ep;
557 563
558 ret = of_property_read_u32(child, "reg", &i); 564 ret = of_property_read_u32(child, "reg", &i);
559 if (ret || i < 0 || i > 1) 565 if (ret || i < 0 || i > 1)
@@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
576 * The output port is port@4 with an external 4-port mux or 582 * The output port is port@4 with an external 4-port mux or
577 * port@2 with the internal 2-port mux. 583 * port@2 with the internal 2-port mux.
578 */ 584 */
579 port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); 585 ep = of_graph_get_endpoint_by_regs(child,
580 if (port) { 586 imx_ldb->lvds_mux ? 4 : 2,
581 struct device_node *endpoint, *remote; 587 -1);
582 588 if (ep) {
583 endpoint = of_get_child_by_name(port, "endpoint"); 589 struct device_node *remote;
584 if (endpoint) { 590
585 remote = of_graph_get_remote_port_parent(endpoint); 591 remote = of_graph_get_remote_port_parent(ep);
586 if (remote) 592 of_node_put(ep);
587 channel->panel = of_drm_find_panel(remote); 593 if (remote)
588 else 594 channel->panel = of_drm_find_panel(remote);
589 return -EPROBE_DEFER; 595 else
590 if (!channel->panel) { 596 return -EPROBE_DEFER;
591 dev_err(dev, "panel not found: %s\n", 597 of_node_put(remote);
592 remote->full_name); 598 if (!channel->panel) {
593 return -EPROBE_DEFER; 599 dev_err(dev, "panel not found: %s\n",
594 } 600 remote->full_name);
601 return -EPROBE_DEFER;
595 } 602 }
596 } 603 }
597 604
598 edidp = of_get_property(child, "edid", &channel->edid_len); 605 ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
599 if (edidp) { 606 if (ddc_node) {
600 channel->edid = kmemdup(edidp, channel->edid_len, 607 channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
601 GFP_KERNEL); 608 of_node_put(ddc_node);
602 } else if (!channel->panel) { 609 if (!channel->ddc) {
603 ret = of_get_drm_display_mode(child, &channel->mode, 0); 610 dev_warn(dev, "failed to get ddc i2c adapter\n");
604 if (!ret) 611 return -EPROBE_DEFER;
605 channel->mode_valid = 1; 612 }
613 }
614
615 if (!channel->ddc) {
616 /* if no DDC available, fallback to hardcoded EDID */
617 dev_dbg(dev, "no ddc available\n");
618
619 edidp = of_get_property(child, "edid",
620 &channel->edid_len);
621 if (edidp) {
622 channel->edid = kmemdup(edidp,
623 channel->edid_len,
624 GFP_KERNEL);
625 } else if (!channel->panel) {
626 /* fallback to display-timings node */
627 ret = of_get_drm_display_mode(child,
628 &channel->mode,
629 OF_USE_NATIVE_MODE);
630 if (!ret)
631 channel->mode_valid = 1;
632 }
606 } 633 }
607 634
608 channel->bus_format = of_get_bus_format(dev, child); 635 channel->bus_format = of_get_bus_format(dev, child);
@@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
647 channel->encoder.funcs->destroy(&channel->encoder); 674 channel->encoder.funcs->destroy(&channel->encoder);
648 675
649 kfree(channel->edid); 676 kfree(channel->edid);
677 i2c_put_adapter(channel->ddc);
650 } 678 }
651} 679}
652 680
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index ae7a9fb3b8a2..baf788121287 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
294 294
295 switch (tve->mode) { 295 switch (tve->mode) {
296 case TVE_MODE_VGA: 296 case TVE_MODE_VGA:
297 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, 297 imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
298 tve->hsync_pin, tve->vsync_pin); 298 tve->hsync_pin, tve->vsync_pin,
299 DRM_BUS_FLAG_DE_HIGH |
300 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
299 break; 301 break;
300 case TVE_MODE_TVOUT: 302 case TVE_MODE_TVOUT:
301 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); 303 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index b2c30b8d9816..fc040417e1e8 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -66,6 +66,7 @@ struct ipu_crtc {
66 struct ipu_flip_work *flip_work; 66 struct ipu_flip_work *flip_work;
67 int irq; 67 int irq;
68 u32 bus_format; 68 u32 bus_format;
69 u32 bus_flags;
69 int di_hsync_pin; 70 int di_hsync_pin;
70 int di_vsync_pin; 71 int di_vsync_pin;
71}; 72};
@@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
271 else 272 else
272 sig_cfg.clkflags = 0; 273 sig_cfg.clkflags = 0;
273 274
274 sig_cfg.enable_pol = 1; 275 sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
275 sig_cfg.clk_pol = 0; 276 /* Default to driving pixel data on negative clock edges */
277 sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
278 DRM_BUS_FLAG_PIXDATA_POSEDGE);
276 sig_cfg.bus_format = ipu_crtc->bus_format; 279 sig_cfg.bus_format = ipu_crtc->bus_format;
277 sig_cfg.v_to_h_sync = 0; 280 sig_cfg.v_to_h_sync = 0;
278 sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; 281 sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
@@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
396} 399}
397 400
398static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, 401static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
399 u32 bus_format, int hsync_pin, int vsync_pin) 402 u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
400{ 403{
401 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 404 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
402 405
403 ipu_crtc->bus_format = bus_format; 406 ipu_crtc->bus_format = bus_format;
407 ipu_crtc->bus_flags = bus_flags;
404 ipu_crtc->di_hsync_pin = hsync_pin; 408 ipu_crtc->di_hsync_pin = hsync_pin;
405 ipu_crtc->di_vsync_pin = vsync_pin; 409 ipu_crtc->di_vsync_pin = vsync_pin;
406 410
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 681ec6eb77d9..a4bb44118d33 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = {
38 DRM_FORMAT_RGBX8888, 38 DRM_FORMAT_RGBX8888,
39 DRM_FORMAT_BGRA8888, 39 DRM_FORMAT_BGRA8888,
40 DRM_FORMAT_BGRA8888, 40 DRM_FORMAT_BGRA8888,
41 DRM_FORMAT_UYVY,
42 DRM_FORMAT_VYUY,
41 DRM_FORMAT_YUYV, 43 DRM_FORMAT_YUYV,
42 DRM_FORMAT_YVYU, 44 DRM_FORMAT_YVYU,
43 DRM_FORMAT_YUV420, 45 DRM_FORMAT_YUV420,
@@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
428 if (crtc != plane->crtc) 430 if (crtc != plane->crtc)
429 dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", 431 dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
430 plane->crtc, crtc); 432 plane->crtc, crtc);
431 plane->crtc = crtc;
432 433
433 if (!ipu_plane->enabled) 434 if (!ipu_plane->enabled)
434 ipu_plane_enable(ipu_plane); 435 ipu_plane_enable(ipu_plane);
@@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
461 kfree(ipu_plane); 462 kfree(ipu_plane);
462} 463}
463 464
464static struct drm_plane_funcs ipu_plane_funcs = { 465static const struct drm_plane_funcs ipu_plane_funcs = {
465 .update_plane = ipu_update_plane, 466 .update_plane = ipu_update_plane,
466 .disable_plane = ipu_disable_plane, 467 .disable_plane = ipu_disable_plane,
467 .destroy = ipu_plane_destroy, 468 .destroy = ipu_plane_destroy,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 363e2c7741e2..2d1fd02cd3d6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -35,7 +35,6 @@ struct imx_parallel_display {
35 void *edid; 35 void *edid;
36 int edid_len; 36 int edid_len;
37 u32 bus_format; 37 u32 bus_format;
38 int mode_valid;
39 struct drm_display_mode mode; 38 struct drm_display_mode mode;
40 struct drm_panel *panel; 39 struct drm_panel *panel;
41}; 40};
@@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
68 num_modes = drm_add_edid_modes(connector, imxpd->edid); 67 num_modes = drm_add_edid_modes(connector, imxpd->edid);
69 } 68 }
70 69
71 if (imxpd->mode_valid) {
72 struct drm_display_mode *mode = drm_mode_create(connector->dev);
73
74 if (!mode)
75 return -EINVAL;
76 drm_mode_copy(mode, &imxpd->mode);
77 mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
78 drm_mode_probed_add(connector, mode);
79 num_modes++;
80 }
81
82 if (np) { 70 if (np) {
83 struct drm_display_mode *mode = drm_mode_create(connector->dev); 71 struct drm_display_mode *mode = drm_mode_create(connector->dev);
84 72
@@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
115static void imx_pd_encoder_prepare(struct drm_encoder *encoder) 103static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
116{ 104{
117 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); 105 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
118 106 imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
119 imx_drm_set_bus_format(encoder, imxpd->bus_format); 107 imxpd->connector.display_info.bus_flags);
120} 108}
121 109
122static void imx_pd_encoder_commit(struct drm_encoder *encoder) 110static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
203{ 191{
204 struct drm_device *drm = data; 192 struct drm_device *drm = data;
205 struct device_node *np = dev->of_node; 193 struct device_node *np = dev->of_node;
206 struct device_node *port; 194 struct device_node *ep;
207 const u8 *edidp; 195 const u8 *edidp;
208 struct imx_parallel_display *imxpd; 196 struct imx_parallel_display *imxpd;
209 int ret; 197 int ret;
@@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
230 } 218 }
231 219
232 /* port@1 is the output port */ 220 /* port@1 is the output port */
233 port = of_graph_get_port_by_id(np, 1); 221 ep = of_graph_get_endpoint_by_regs(np, 1, -1);
234 if (port) { 222 if (ep) {
235 struct device_node *endpoint, *remote; 223 struct device_node *remote;
236 224
237 endpoint = of_get_child_by_name(port, "endpoint"); 225 remote = of_graph_get_remote_port_parent(ep);
238 if (endpoint) { 226 of_node_put(ep);
239 remote = of_graph_get_remote_port_parent(endpoint); 227 if (remote) {
240 if (remote) 228 imxpd->panel = of_drm_find_panel(remote);
241 imxpd->panel = of_drm_find_panel(remote); 229 of_node_put(remote);
242 if (!imxpd->panel)
243 return -EPROBE_DEFER;
244 } 230 }
231 if (!imxpd->panel)
232 return -EPROBE_DEFER;
245 } 233 }
246 234
247 imxpd->dev = dev; 235 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index eeefc971801a..23ac8041c562 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -6,7 +6,6 @@ config DRM_MEDIATEK
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_MIPI_DSI 7 select DRM_MIPI_DSI
8 select DRM_PANEL 8 select DRM_PANEL
9 select IOMMU_DMA
10 select MEMORY 9 select MEMORY
11 select MTK_SMI 10 select MTK_SMI
12 help 11 help
@@ -14,3 +13,11 @@ config DRM_MEDIATEK
14 The module will be called mediatek-drm 13 The module will be called mediatek-drm
15 This driver provides kernel mode setting and 14 This driver provides kernel mode setting and
16 buffer management to userspace. 15 buffer management to userspace.
16
17config DRM_MEDIATEK_HDMI
18 tristate "DRM HDMI Support for Mediatek SoCs"
19 depends on DRM_MEDIATEK
20 select SND_SOC_HDMI_CODEC if SND_SOC
21 select GENERIC_PHY
22 help
23 DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5fcf58e87786..bf2e5be1ab30 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \
12 mtk_dpi.o 12 mtk_dpi.o
13 13
14obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o 14obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
15
16mediatek-drm-hdmi-objs := mtk_cec.o \
17 mtk_hdmi.o \
18 mtk_hdmi_ddc.o \
19 mtk_mt8173_hdmi_phy.o
20
21obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
new file mode 100644
index 000000000000..7a3eb8c17ef9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19
20#include "mtk_cec.h"
21
22#define TR_CONFIG 0x00
23#define CLEAR_CEC_IRQ BIT(15)
24
25#define CEC_CKGEN 0x04
26#define CEC_32K_PDN BIT(19)
27#define PDN BIT(16)
28
29#define RX_EVENT 0x54
30#define HDMI_PORD BIT(25)
31#define HDMI_HTPLG BIT(24)
32#define HDMI_PORD_INT_EN BIT(9)
33#define HDMI_HTPLG_INT_EN BIT(8)
34
35#define RX_GEN_WD 0x58
36#define HDMI_PORD_INT_32K_STATUS BIT(26)
37#define RX_RISC_INT_32K_STATUS BIT(25)
38#define HDMI_HTPLG_INT_32K_STATUS BIT(24)
39#define HDMI_PORD_INT_32K_CLR BIT(18)
40#define RX_INT_32K_CLR BIT(17)
41#define HDMI_HTPLG_INT_32K_CLR BIT(16)
42#define HDMI_PORD_INT_32K_STA_MASK BIT(10)
43#define RX_RISC_INT_32K_STA_MASK BIT(9)
44#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8)
45#define HDMI_PORD_INT_32K_EN BIT(2)
46#define RX_INT_32K_EN BIT(1)
47#define HDMI_HTPLG_INT_32K_EN BIT(0)
48
49#define NORMAL_INT_CTRL 0x5C
50#define HDMI_HTPLG_INT_STA BIT(0)
51#define HDMI_PORD_INT_STA BIT(1)
52#define HDMI_HTPLG_INT_CLR BIT(16)
53#define HDMI_PORD_INT_CLR BIT(17)
54#define HDMI_FULL_INT_CLR BIT(20)
55
56struct mtk_cec {
57 void __iomem *regs;
58 struct clk *clk;
59 int irq;
60 bool hpd;
61 void (*hpd_event)(bool hpd, struct device *dev);
62 struct device *hdmi_dev;
63 spinlock_t lock;
64};
65
66static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
67 unsigned int bits)
68{
69 void __iomem *reg = cec->regs + offset;
70 u32 tmp;
71
72 tmp = readl(reg);
73 tmp &= ~bits;
74 writel(tmp, reg);
75}
76
77static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
78 unsigned int bits)
79{
80 void __iomem *reg = cec->regs + offset;
81 u32 tmp;
82
83 tmp = readl(reg);
84 tmp |= bits;
85 writel(tmp, reg);
86}
87
88static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
89 unsigned int val, unsigned int mask)
90{
91 u32 tmp = readl(cec->regs + offset) & ~mask;
92
93 tmp |= val & mask;
94 writel(val, cec->regs + offset);
95}
96
97void mtk_cec_set_hpd_event(struct device *dev,
98 void (*hpd_event)(bool hpd, struct device *dev),
99 struct device *hdmi_dev)
100{
101 struct mtk_cec *cec = dev_get_drvdata(dev);
102 unsigned long flags;
103
104 spin_lock_irqsave(&cec->lock, flags);
105 cec->hdmi_dev = hdmi_dev;
106 cec->hpd_event = hpd_event;
107 spin_unlock_irqrestore(&cec->lock, flags);
108}
109
110bool mtk_cec_hpd_high(struct device *dev)
111{
112 struct mtk_cec *cec = dev_get_drvdata(dev);
113 unsigned int status;
114
115 status = readl(cec->regs + RX_EVENT);
116
117 return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
118}
119
120static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
121{
122 mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
123 mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
124 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
125 mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
126 HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
127 RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
128}
129
130static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
131{
132 mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
133}
134
135static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
136{
137 mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
138}
139
140static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
141{
142 mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
143 mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
144 HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
145 mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
146 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
147 usleep_range(5, 10);
148 mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
149 HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
150 mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
151 mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
152 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
153}
154
155static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
156{
157 void (*hpd_event)(bool hpd, struct device *dev);
158 struct device *hdmi_dev;
159 unsigned long flags;
160
161 spin_lock_irqsave(&cec->lock, flags);
162 hpd_event = cec->hpd_event;
163 hdmi_dev = cec->hdmi_dev;
164 spin_unlock_irqrestore(&cec->lock, flags);
165
166 if (hpd_event)
167 hpd_event(hpd, hdmi_dev);
168}
169
170static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
171{
172 struct device *dev = arg;
173 struct mtk_cec *cec = dev_get_drvdata(dev);
174 bool hpd;
175
176 mtk_cec_clear_htplg_irq(cec);
177 hpd = mtk_cec_hpd_high(dev);
178
179 if (cec->hpd != hpd) {
180 dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
181 cec->hpd, hpd);
182 cec->hpd = hpd;
183 mtk_cec_hpd_event(cec, hpd);
184 }
185 return IRQ_HANDLED;
186}
187
188static int mtk_cec_probe(struct platform_device *pdev)
189{
190 struct device *dev = &pdev->dev;
191 struct mtk_cec *cec;
192 struct resource *res;
193 int ret;
194
195 cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
196 if (!cec)
197 return -ENOMEM;
198
199 platform_set_drvdata(pdev, cec);
200 spin_lock_init(&cec->lock);
201
202 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
203 cec->regs = devm_ioremap_resource(dev, res);
204 if (IS_ERR(cec->regs)) {
205 ret = PTR_ERR(cec->regs);
206 dev_err(dev, "Failed to ioremap cec: %d\n", ret);
207 return ret;
208 }
209
210 cec->clk = devm_clk_get(dev, NULL);
211 if (IS_ERR(cec->clk)) {
212 ret = PTR_ERR(cec->clk);
213 dev_err(dev, "Failed to get cec clock: %d\n", ret);
214 return ret;
215 }
216
217 cec->irq = platform_get_irq(pdev, 0);
218 if (cec->irq < 0) {
219 dev_err(dev, "Failed to get cec irq: %d\n", cec->irq);
220 return cec->irq;
221 }
222
223 ret = devm_request_threaded_irq(dev, cec->irq, NULL,
224 mtk_cec_htplg_isr_thread,
225 IRQF_SHARED | IRQF_TRIGGER_LOW |
226 IRQF_ONESHOT, "hdmi hpd", dev);
227 if (ret) {
228 dev_err(dev, "Failed to register cec irq: %d\n", ret);
229 return ret;
230 }
231
232 ret = clk_prepare_enable(cec->clk);
233 if (ret) {
234 dev_err(dev, "Failed to enable cec clock: %d\n", ret);
235 return ret;
236 }
237
238 mtk_cec_htplg_irq_init(cec);
239 mtk_cec_htplg_irq_enable(cec);
240
241 return 0;
242}
243
244static int mtk_cec_remove(struct platform_device *pdev)
245{
246 struct mtk_cec *cec = platform_get_drvdata(pdev);
247
248 mtk_cec_htplg_irq_disable(cec);
249 clk_disable_unprepare(cec->clk);
250 return 0;
251}
252
253static const struct of_device_id mtk_cec_of_ids[] = {
254 { .compatible = "mediatek,mt8173-cec", },
255 {}
256};
257
258struct platform_driver mtk_cec_driver = {
259 .probe = mtk_cec_probe,
260 .remove = mtk_cec_remove,
261 .driver = {
262 .name = "mediatek-cec",
263 .of_match_table = mtk_cec_of_ids,
264 },
265};
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h
new file mode 100644
index 000000000000..10057b7eabec
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_CEC_H
15#define _MTK_CEC_H
16
17#include <linux/types.h>
18
19struct device;
20
21void mtk_cec_set_hpd_event(struct device *dev,
22 void (*hotplug_event)(bool hpd, struct device *dev),
23 struct device *hdmi_dev);
24bool mtk_cec_hpd_high(struct device *dev);
25
26#endif /* _MTK_CEC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index d05ca7901315..0186e500d2a5 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
432 unsigned long pll_rate; 432 unsigned long pll_rate;
433 unsigned int factor; 433 unsigned int factor;
434 434
435 if (!dpi) {
436 dev_err(dpi->dev, "invalid argument\n");
437 return -EINVAL;
438 }
439
440 pix_rate = 1000UL * mode->clock; 435 pix_rate = 1000UL * mode->clock;
441 if (mode->clock <= 74000) 436 if (mode->clock <= 74000)
442 factor = 8 * 3; 437 factor = 8 * 3;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d0ab..eebb7d881c2b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
91 mutex_lock(&private->commit.lock); 91 mutex_lock(&private->commit.lock);
92 flush_work(&private->commit.work); 92 flush_work(&private->commit.work);
93 93
94 drm_atomic_helper_swap_state(drm, state); 94 drm_atomic_helper_swap_state(state, true);
95 95
96 if (async) 96 if (async)
97 mtk_atomic_schedule(private, state); 97 mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
243 .enable_vblank = mtk_drm_crtc_enable_vblank, 243 .enable_vblank = mtk_drm_crtc_enable_vblank,
244 .disable_vblank = mtk_drm_crtc_disable_vblank, 244 .disable_vblank = mtk_drm_crtc_disable_vblank,
245 245
246 .gem_free_object = mtk_drm_gem_free_object, 246 .gem_free_object_unlocked = mtk_drm_gem_free_object,
247 .gem_vm_ops = &drm_gem_cma_vm_ops, 247 .gem_vm_ops = &drm_gem_cma_vm_ops,
248 .dumb_create = mtk_drm_gem_dumb_create, 248 .dumb_create = mtk_drm_gem_dumb_create,
249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset, 249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
@@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev)
280 if (!drm) 280 if (!drm)
281 return -ENOMEM; 281 return -ENOMEM;
282 282
283 drm_dev_set_unique(drm, dev_name(dev));
284
285 drm->dev_private = private; 283 drm->dev_private = private;
286 private->drm = drm; 284 private->drm = drm;
287 285
@@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev)
293 if (ret < 0) 291 if (ret < 0)
294 goto err_deinit; 292 goto err_deinit;
295 293
296 ret = drm_connector_register_all(drm);
297 if (ret < 0)
298 goto err_unregister;
299
300 return 0; 294 return 0;
301 295
302err_unregister:
303 drm_dev_unregister(drm);
304err_deinit: 296err_deinit:
305 mtk_drm_kms_deinit(drm); 297 mtk_drm_kms_deinit(drm);
306err_free: 298err_free:
@@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
455 struct drm_device *drm = private->drm; 447 struct drm_device *drm = private->drm;
456 int i; 448 int i;
457 449
458 drm_connector_unregister_all(drm);
459 drm_dev_unregister(drm); 450 drm_dev_unregister(drm);
460 mtk_drm_kms_deinit(drm); 451 mtk_drm_kms_deinit(drm);
461 drm_dev_unref(drm); 452 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 51bc8988fc26..3995765a90dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
170 170
171 return drm_plane_helper_check_update(plane, state->crtc, fb, 171 return drm_plane_helper_check_update(plane, state->crtc, fb,
172 &src, &dest, &clip, 172 &src, &dest, &clip,
173 state->rotation,
173 DRM_PLANE_HELPER_NO_SCALING, 174 DRM_PLANE_HELPER_NO_SCALING,
174 DRM_PLANE_HELPER_NO_SCALING, 175 DRM_PLANE_HELPER_NO_SCALING,
175 true, true, &visible); 176 true, true, &visible);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 2d808e59fefd..28b2044ed9f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
575 return drm_panel_get_modes(dsi->panel); 575 return drm_panel_get_modes(dsi->panel);
576} 576}
577 577
578static struct drm_encoder *mtk_dsi_connector_best_encoder(
579 struct drm_connector *connector)
580{
581 struct mtk_dsi *dsi = connector_to_dsi(connector);
582
583 return &dsi->encoder;
584}
585
586static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { 578static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
587 .mode_fixup = mtk_dsi_encoder_mode_fixup, 579 .mode_fixup = mtk_dsi_encoder_mode_fixup,
588 .mode_set = mtk_dsi_encoder_mode_set, 580 .mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
603static const struct drm_connector_helper_funcs 595static const struct drm_connector_helper_funcs
604 mtk_dsi_connector_helper_funcs = { 596 mtk_dsi_connector_helper_funcs = {
605 .get_modes = mtk_dsi_connector_get_modes, 597 .get_modes = mtk_dsi_connector_get_modes,
606 .best_encoder = mtk_dsi_connector_best_encoder,
607}; 598};
608 599
609static int mtk_drm_attach_bridge(struct drm_bridge *bridge, 600static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
@@ -695,10 +686,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
695{ 686{
696 drm_encoder_cleanup(&dsi->encoder); 687 drm_encoder_cleanup(&dsi->encoder);
697 /* Skip connector cleanup if creation was delegated to the bridge */ 688 /* Skip connector cleanup if creation was delegated to the bridge */
698 if (dsi->conn.dev) { 689 if (dsi->conn.dev)
699 drm_connector_unregister(&dsi->conn);
700 drm_connector_cleanup(&dsi->conn); 690 drm_connector_cleanup(&dsi->conn);
701 }
702} 691}
703 692
704static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) 693static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
new file mode 100644
index 000000000000..334562d06731
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -0,0 +1,1828 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_edid.h>
19#include <linux/arm-smccc.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/hdmi.h>
23#include <linux/i2c.h>
24#include <linux/io.h>
25#include <linux/kernel.h>
26#include <linux/mfd/syscon.h>
27#include <linux/of_platform.h>
28#include <linux/of.h>
29#include <linux/of_gpio.h>
30#include <linux/of_graph.h>
31#include <linux/phy/phy.h>
32#include <linux/platform_device.h>
33#include <linux/regmap.h>
34#include <sound/hdmi-codec.h>
35#include "mtk_cec.h"
36#include "mtk_hdmi.h"
37#include "mtk_hdmi_regs.h"
38
39#define NCTS_BYTES 7
40
41enum mtk_hdmi_clk_id {
42 MTK_HDMI_CLK_HDMI_PIXEL,
43 MTK_HDMI_CLK_HDMI_PLL,
44 MTK_HDMI_CLK_AUD_BCLK,
45 MTK_HDMI_CLK_AUD_SPDIF,
46 MTK_HDMI_CLK_COUNT
47};
48
49enum hdmi_aud_input_type {
50 HDMI_AUD_INPUT_I2S = 0,
51 HDMI_AUD_INPUT_SPDIF,
52};
53
54enum hdmi_aud_i2s_fmt {
55 HDMI_I2S_MODE_RJT_24BIT = 0,
56 HDMI_I2S_MODE_RJT_16BIT,
57 HDMI_I2S_MODE_LJT_24BIT,
58 HDMI_I2S_MODE_LJT_16BIT,
59 HDMI_I2S_MODE_I2S_24BIT,
60 HDMI_I2S_MODE_I2S_16BIT
61};
62
63enum hdmi_aud_mclk {
64 HDMI_AUD_MCLK_128FS,
65 HDMI_AUD_MCLK_192FS,
66 HDMI_AUD_MCLK_256FS,
67 HDMI_AUD_MCLK_384FS,
68 HDMI_AUD_MCLK_512FS,
69 HDMI_AUD_MCLK_768FS,
70 HDMI_AUD_MCLK_1152FS,
71};
72
73enum hdmi_aud_channel_type {
74 HDMI_AUD_CHAN_TYPE_1_0 = 0,
75 HDMI_AUD_CHAN_TYPE_1_1,
76 HDMI_AUD_CHAN_TYPE_2_0,
77 HDMI_AUD_CHAN_TYPE_2_1,
78 HDMI_AUD_CHAN_TYPE_3_0,
79 HDMI_AUD_CHAN_TYPE_3_1,
80 HDMI_AUD_CHAN_TYPE_4_0,
81 HDMI_AUD_CHAN_TYPE_4_1,
82 HDMI_AUD_CHAN_TYPE_5_0,
83 HDMI_AUD_CHAN_TYPE_5_1,
84 HDMI_AUD_CHAN_TYPE_6_0,
85 HDMI_AUD_CHAN_TYPE_6_1,
86 HDMI_AUD_CHAN_TYPE_7_0,
87 HDMI_AUD_CHAN_TYPE_7_1,
88 HDMI_AUD_CHAN_TYPE_3_0_LRS,
89 HDMI_AUD_CHAN_TYPE_3_1_LRS,
90 HDMI_AUD_CHAN_TYPE_4_0_CLRS,
91 HDMI_AUD_CHAN_TYPE_4_1_CLRS,
92 HDMI_AUD_CHAN_TYPE_6_1_CS,
93 HDMI_AUD_CHAN_TYPE_6_1_CH,
94 HDMI_AUD_CHAN_TYPE_6_1_OH,
95 HDMI_AUD_CHAN_TYPE_6_1_CHR,
96 HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
97 HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
98 HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
99 HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
100 HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
101 HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
102 HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
103 HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
104 HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
105 HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
106 HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
107 HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
108 HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
109 HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
110 HDMI_AUD_CHAN_TYPE_6_0_CS,
111 HDMI_AUD_CHAN_TYPE_6_0_CH,
112 HDMI_AUD_CHAN_TYPE_6_0_OH,
113 HDMI_AUD_CHAN_TYPE_6_0_CHR,
114 HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
115 HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
116 HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
117 HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
118 HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
119 HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
120 HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
121 HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
122 HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
123 HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
124 HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
125 HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
126 HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
127 HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
128 HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
129 HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
130};
131
132enum hdmi_aud_channel_swap_type {
133 HDMI_AUD_SWAP_LR,
134 HDMI_AUD_SWAP_LFE_CC,
135 HDMI_AUD_SWAP_LSRS,
136 HDMI_AUD_SWAP_RLS_RRS,
137 HDMI_AUD_SWAP_LR_STATUS,
138};
139
140struct hdmi_audio_param {
141 enum hdmi_audio_coding_type aud_codec;
142 enum hdmi_audio_sample_size aud_sampe_size;
143 enum hdmi_aud_input_type aud_input_type;
144 enum hdmi_aud_i2s_fmt aud_i2s_fmt;
145 enum hdmi_aud_mclk aud_mclk;
146 enum hdmi_aud_channel_type aud_input_chan_type;
147 struct hdmi_codec_params codec_params;
148};
149
150struct mtk_hdmi {
151 struct drm_bridge bridge;
152 struct drm_connector conn;
153 struct device *dev;
154 struct phy *phy;
155 struct device *cec_dev;
156 struct i2c_adapter *ddc_adpt;
157 struct clk *clk[MTK_HDMI_CLK_COUNT];
158 struct drm_display_mode mode;
159 bool dvi_mode;
160 u32 min_clock;
161 u32 max_clock;
162 u32 max_hdisplay;
163 u32 max_vdisplay;
164 u32 ibias;
165 u32 ibias_up;
166 struct regmap *sys_regmap;
167 unsigned int sys_offset;
168 void __iomem *regs;
169 enum hdmi_colorspace csp;
170 struct hdmi_audio_param aud_param;
171 bool audio_enable;
172 bool powered;
173 bool enabled;
174};
175
176static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
177{
178 return container_of(b, struct mtk_hdmi, bridge);
179}
180
181static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
182{
183 return container_of(c, struct mtk_hdmi, conn);
184}
185
186static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
187{
188 return readl(hdmi->regs + offset);
189}
190
191static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
192{
193 writel(val, hdmi->regs + offset);
194}
195
196static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
197{
198 void __iomem *reg = hdmi->regs + offset;
199 u32 tmp;
200
201 tmp = readl(reg);
202 tmp &= ~bits;
203 writel(tmp, reg);
204}
205
206static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
207{
208 void __iomem *reg = hdmi->regs + offset;
209 u32 tmp;
210
211 tmp = readl(reg);
212 tmp |= bits;
213 writel(tmp, reg);
214}
215
216static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
217{
218 void __iomem *reg = hdmi->regs + offset;
219 u32 tmp;
220
221 tmp = readl(reg);
222 tmp = (tmp & ~mask) | (val & mask);
223 writel(tmp, reg);
224}
225
226static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
227{
228 mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
229 VIDEO_SOURCE_SEL);
230}
231
232static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
233{
234 struct arm_smccc_res res;
235
236 /*
237 * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
238 * output. This bit can only be controlled in ARM supervisor mode.
239 * The ARM trusted firmware provides an API for the HDMI driver to set
240 * this control bit to enable HDMI output in supervisor mode.
241 */
242 arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
243 0, 0, 0, 0, 0, &res);
244
245 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
246 HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
247 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
248 HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
249}
250
251static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
252{
253 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
254 HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
255}
256
257static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
258{
259 mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
260}
261
262static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
263{
264 mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
265}
266
267static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
268{
269 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
270 HDMI_RST, HDMI_RST);
271 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
272 HDMI_RST, 0);
273 mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
274 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
275 ANLG_ON, ANLG_ON);
276}
277
278static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
279{
280 mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
281 CFG2_NOTICE_EN);
282}
283
284static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
285{
286 mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
287}
288
289static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
290{
291 mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
292}
293
294static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
295 u8 len)
296{
297 u32 ctrl_reg = GRL_CTRL;
298 int i;
299 u8 *frame_data;
300 enum hdmi_infoframe_type frame_type;
301 u8 frame_ver;
302 u8 frame_len;
303 u8 checksum;
304 int ctrl_frame_en = 0;
305
306 frame_type = *buffer;
307 buffer += 1;
308 frame_ver = *buffer;
309 buffer += 1;
310 frame_len = *buffer;
311 buffer += 1;
312 checksum = *buffer;
313 buffer += 1;
314 frame_data = buffer;
315
316 dev_dbg(hdmi->dev,
317 "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
318 frame_type, frame_ver, frame_len, checksum);
319
320 switch (frame_type) {
321 case HDMI_INFOFRAME_TYPE_AVI:
322 ctrl_frame_en = CTRL_AVI_EN;
323 ctrl_reg = GRL_CTRL;
324 break;
325 case HDMI_INFOFRAME_TYPE_SPD:
326 ctrl_frame_en = CTRL_SPD_EN;
327 ctrl_reg = GRL_CTRL;
328 break;
329 case HDMI_INFOFRAME_TYPE_AUDIO:
330 ctrl_frame_en = CTRL_AUDIO_EN;
331 ctrl_reg = GRL_CTRL;
332 break;
333 case HDMI_INFOFRAME_TYPE_VENDOR:
334 ctrl_frame_en = VS_EN;
335 ctrl_reg = GRL_ACP_ISRC_CTRL;
336 break;
337 }
338 mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
339 mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
340 mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
341 mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
342
343 mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
344 for (i = 0; i < frame_len; i++)
345 mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
346
347 mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
348}
349
350static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
351{
352 mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
353 AUDIO_PACKET_OFF);
354}
355
356static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
357{
358 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
359 HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
360 usleep_range(2000, 4000);
361 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
362 HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
363}
364
365static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
366{
367 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
368 DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
369 COLOR_8BIT_MODE);
370}
371
372static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
373{
374 mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
375 usleep_range(2000, 4000);
376 mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
377}
378
379static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
380{
381 mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
382 CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
383 usleep_range(2000, 4000);
384 mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
385 CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
386}
387
388static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
389{
390 mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
391 CTS_CTRL_SOFT);
392}
393
394static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
395 bool enable)
396{
397 mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
398 NCTS_WRI_ANYTIME);
399}
400
401static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
402 struct drm_display_mode *mode)
403{
404 mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
405
406 if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
407 mode->clock == 74250 &&
408 mode->vdisplay == 1080)
409 mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
410 else
411 mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
412}
413
414static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
415 enum hdmi_aud_channel_swap_type swap)
416{
417 u8 swap_bit;
418
419 switch (swap) {
420 case HDMI_AUD_SWAP_LR:
421 swap_bit = LR_SWAP;
422 break;
423 case HDMI_AUD_SWAP_LFE_CC:
424 swap_bit = LFE_CC_SWAP;
425 break;
426 case HDMI_AUD_SWAP_LSRS:
427 swap_bit = LSRS_SWAP;
428 break;
429 case HDMI_AUD_SWAP_RLS_RRS:
430 swap_bit = RLS_RRS_SWAP;
431 break;
432 case HDMI_AUD_SWAP_LR_STATUS:
433 swap_bit = LR_STATUS_SWAP;
434 break;
435 default:
436 swap_bit = LFE_CC_SWAP;
437 break;
438 }
439 mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
440}
441
442static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
443 enum hdmi_audio_sample_size bit_num)
444{
445 u32 val;
446
447 switch (bit_num) {
448 case HDMI_AUDIO_SAMPLE_SIZE_16:
449 val = AOUT_16BIT;
450 break;
451 case HDMI_AUDIO_SAMPLE_SIZE_20:
452 val = AOUT_20BIT;
453 break;
454 case HDMI_AUDIO_SAMPLE_SIZE_24:
455 case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
456 val = AOUT_24BIT;
457 break;
458 }
459
460 mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
461}
462
463static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
464 enum hdmi_aud_i2s_fmt i2s_fmt)
465{
466 u32 val;
467
468 val = mtk_hdmi_read(hdmi, GRL_CFG0);
469 val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
470
471 switch (i2s_fmt) {
472 case HDMI_I2S_MODE_RJT_24BIT:
473 val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
474 break;
475 case HDMI_I2S_MODE_RJT_16BIT:
476 val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
477 break;
478 case HDMI_I2S_MODE_LJT_24BIT:
479 default:
480 val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
481 break;
482 case HDMI_I2S_MODE_LJT_16BIT:
483 val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
484 break;
485 case HDMI_I2S_MODE_I2S_24BIT:
486 val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
487 break;
488 case HDMI_I2S_MODE_I2S_16BIT:
489 val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
490 break;
491 }
492 mtk_hdmi_write(hdmi, GRL_CFG0, val);
493}
494
495static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
496{
497 const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
498 u8 val;
499
500 /* Disable high bitrate, set DST packet normal/double */
501 mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
502
503 if (dst)
504 val = DST_NORMAL_DOUBLE | SACD_DST;
505 else
506 val = 0;
507
508 mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
509}
510
511static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
512 enum hdmi_aud_channel_type channel_type,
513 u8 channel_count)
514{
515 unsigned int ch_switch;
516 u8 i2s_uv;
517
518 ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
519 CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
520 CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
521 CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
522
523 if (channel_count == 2) {
524 i2s_uv = I2S_UV_CH_EN(0);
525 } else if (channel_count == 3 || channel_count == 4) {
526 if (channel_count == 4 &&
527 (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
528 channel_type == HDMI_AUD_CHAN_TYPE_4_0))
529 i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
530 else
531 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
532 } else if (channel_count == 6 || channel_count == 5) {
533 if (channel_count == 6 &&
534 channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
535 channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
536 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
537 I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
538 } else {
539 i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
540 I2S_UV_CH_EN(0);
541 }
542 } else if (channel_count == 8 || channel_count == 7) {
543 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
544 I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
545 } else {
546 i2s_uv = I2S_UV_CH_EN(0);
547 }
548
549 mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
550 mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
551 mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
552 mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
553}
554
555static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
556 enum hdmi_aud_input_type input_type)
557{
558 u32 val;
559
560 val = mtk_hdmi_read(hdmi, GRL_CFG1);
561 if (input_type == HDMI_AUD_INPUT_I2S &&
562 (val & CFG1_SPDIF) == CFG1_SPDIF) {
563 val &= ~CFG1_SPDIF;
564 } else if (input_type == HDMI_AUD_INPUT_SPDIF &&
565 (val & CFG1_SPDIF) == 0) {
566 val |= CFG1_SPDIF;
567 }
568 mtk_hdmi_write(hdmi, GRL_CFG1, val);
569}
570
571static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
572 u8 *channel_status)
573{
574 int i;
575
576 for (i = 0; i < 5; i++) {
577 mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
578 mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
579 mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
580 }
581 for (; i < 24; i++) {
582 mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
583 mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
584 }
585}
586
587static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
588{
589 u32 val;
590
591 val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
592 if (val & MIX_CTRL_SRC_EN) {
593 val &= ~MIX_CTRL_SRC_EN;
594 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
595 usleep_range(255, 512);
596 val |= MIX_CTRL_SRC_EN;
597 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
598 }
599}
600
601static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
602{
603 u32 val;
604
605 val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
606 val &= ~MIX_CTRL_SRC_EN;
607 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
608 mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
609}
610
611static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
612 enum hdmi_aud_mclk mclk)
613{
614 u32 val;
615
616 val = mtk_hdmi_read(hdmi, GRL_CFG5);
617 val &= CFG5_CD_RATIO_MASK;
618
619 switch (mclk) {
620 case HDMI_AUD_MCLK_128FS:
621 val |= CFG5_FS128;
622 break;
623 case HDMI_AUD_MCLK_256FS:
624 val |= CFG5_FS256;
625 break;
626 case HDMI_AUD_MCLK_384FS:
627 val |= CFG5_FS384;
628 break;
629 case HDMI_AUD_MCLK_512FS:
630 val |= CFG5_FS512;
631 break;
632 case HDMI_AUD_MCLK_768FS:
633 val |= CFG5_FS768;
634 break;
635 default:
636 val |= CFG5_FS256;
637 break;
638 }
639 mtk_hdmi_write(hdmi, GRL_CFG5, val);
640}
641
642struct hdmi_acr_n {
643 unsigned int clock;
644 unsigned int n[3];
645};
646
647/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
648static const struct hdmi_acr_n hdmi_rec_n_table[] = {
649 /* Clock, N: 32kHz 44.1kHz 48kHz */
650 { 25175, { 4576, 7007, 6864 } },
651 { 74176, { 11648, 17836, 11648 } },
652 { 148352, { 11648, 8918, 5824 } },
653 { 296703, { 5824, 4459, 5824 } },
654 { 297000, { 3072, 4704, 5120 } },
655 { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
656};
657
658/**
659 * hdmi_recommended_n() - Return N value recommended by HDMI specification
660 * @freq: audio sample rate in Hz
661 * @clock: rounded TMDS clock in kHz
662 */
663static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
664{
665 const struct hdmi_acr_n *recommended;
666 unsigned int i;
667
668 for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
669 if (clock == hdmi_rec_n_table[i].clock)
670 break;
671 }
672 recommended = hdmi_rec_n_table + i;
673
674 switch (freq) {
675 case 32000:
676 return recommended->n[0];
677 case 44100:
678 return recommended->n[1];
679 case 48000:
680 return recommended->n[2];
681 case 88200:
682 return recommended->n[1] * 2;
683 case 96000:
684 return recommended->n[2] * 2;
685 case 176400:
686 return recommended->n[1] * 4;
687 case 192000:
688 return recommended->n[2] * 4;
689 default:
690 return (128 * freq) / 1000;
691 }
692}
693
694static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
695{
696 switch (clock) {
697 case 25175:
698 return 25174825; /* 25.2/1.001 MHz */
699 case 74176:
700 return 74175824; /* 74.25/1.001 MHz */
701 case 148352:
702 return 148351648; /* 148.5/1.001 MHz */
703 case 296703:
704 return 296703297; /* 297/1.001 MHz */
705 default:
706 return clock * 1000;
707 }
708}
709
710static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
711 unsigned int tmds_clock, unsigned int n)
712{
713 return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
714 128 * audio_sample_rate);
715}
716
717static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
718 unsigned int cts)
719{
720 unsigned char val[NCTS_BYTES];
721 int i;
722
723 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
724 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
725 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
726 memset(val, 0, sizeof(val));
727
728 val[0] = (cts >> 24) & 0xff;
729 val[1] = (cts >> 16) & 0xff;
730 val[2] = (cts >> 8) & 0xff;
731 val[3] = cts & 0xff;
732
733 val[4] = (n >> 16) & 0xff;
734 val[5] = (n >> 8) & 0xff;
735 val[6] = n & 0xff;
736
737 for (i = 0; i < NCTS_BYTES; i++)
738 mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
739}
740
741static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
742 unsigned int sample_rate,
743 unsigned int clock)
744{
745 unsigned int n, cts;
746
747 n = hdmi_recommended_n(sample_rate, clock);
748 cts = hdmi_expected_cts(sample_rate, clock, n);
749
750 dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
751 __func__, sample_rate, clock, n, cts);
752
753 mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
754 AUDIO_I2S_NCTS_SEL);
755 do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
756}
757
758static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
759{
760 switch (channel_type) {
761 case HDMI_AUD_CHAN_TYPE_1_0:
762 case HDMI_AUD_CHAN_TYPE_1_1:
763 case HDMI_AUD_CHAN_TYPE_2_0:
764 return 2;
765 case HDMI_AUD_CHAN_TYPE_2_1:
766 case HDMI_AUD_CHAN_TYPE_3_0:
767 return 3;
768 case HDMI_AUD_CHAN_TYPE_3_1:
769 case HDMI_AUD_CHAN_TYPE_4_0:
770 case HDMI_AUD_CHAN_TYPE_3_0_LRS:
771 return 4;
772 case HDMI_AUD_CHAN_TYPE_4_1:
773 case HDMI_AUD_CHAN_TYPE_5_0:
774 case HDMI_AUD_CHAN_TYPE_3_1_LRS:
775 case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
776 return 5;
777 case HDMI_AUD_CHAN_TYPE_5_1:
778 case HDMI_AUD_CHAN_TYPE_6_0:
779 case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
780 case HDMI_AUD_CHAN_TYPE_6_0_CS:
781 case HDMI_AUD_CHAN_TYPE_6_0_CH:
782 case HDMI_AUD_CHAN_TYPE_6_0_OH:
783 case HDMI_AUD_CHAN_TYPE_6_0_CHR:
784 return 6;
785 case HDMI_AUD_CHAN_TYPE_6_1:
786 case HDMI_AUD_CHAN_TYPE_6_1_CS:
787 case HDMI_AUD_CHAN_TYPE_6_1_CH:
788 case HDMI_AUD_CHAN_TYPE_6_1_OH:
789 case HDMI_AUD_CHAN_TYPE_6_1_CHR:
790 case HDMI_AUD_CHAN_TYPE_7_0:
791 case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
792 case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
793 case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
794 case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
795 case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
796 case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
797 case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
798 case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
799 case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
800 case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
801 case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
802 case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
803 case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
804 case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
805 case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
806 return 7;
807 case HDMI_AUD_CHAN_TYPE_7_1:
808 case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
809 case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
810 case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
811 case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
812 case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
813 case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
814 case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
815 case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
816 case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
817 case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
818 case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
819 case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
820 case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
821 case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
822 return 8;
823 default:
824 return 2;
825 }
826}
827
828static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
829{
830 unsigned long rate;
831 int ret;
832
833 /* The DPI driver already should have set TVDPLL to the correct rate */
834 ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
835 if (ret) {
836 dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
837 ret);
838 return ret;
839 }
840
841 rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
842
843 if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
844 dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
845 rate);
846 else
847 dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
848
849 mtk_hdmi_hw_config_sys(hdmi);
850 mtk_hdmi_hw_set_deep_color_mode(hdmi);
851 return 0;
852}
853
854static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
855 struct drm_display_mode *mode)
856{
857 mtk_hdmi_hw_reset(hdmi);
858 mtk_hdmi_hw_enable_notice(hdmi, true);
859 mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
860 mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
861 mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
862
863 mtk_hdmi_hw_msic_setting(hdmi, mode);
864}
865
866static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable)
867{
868 mtk_hdmi_hw_send_aud_packet(hdmi, enable);
869 return 0;
870}
871
872static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on)
873{
874 mtk_hdmi_hw_ncts_enable(hdmi, on);
875 return 0;
876}
877
878static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
879{
880 enum hdmi_aud_channel_type chan_type;
881 u8 chan_count;
882 bool dst;
883
884 mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
885 mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
886
887 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
888 hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
889 mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
890 } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
891 hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
892 }
893
894 mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
895 mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
896
897 dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
898 (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
899 mtk_hdmi_hw_audio_config(hdmi, dst);
900
901 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
902 chan_type = HDMI_AUD_CHAN_TYPE_2_0;
903 else
904 chan_type = hdmi->aud_param.aud_input_chan_type;
905 chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
906 mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
907 mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
908
909 return 0;
910}
911
912static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
913 struct drm_display_mode *display_mode)
914{
915 unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
916
917 mtk_hdmi_aud_on_off_hw_ncts(hdmi, false);
918 mtk_hdmi_hw_aud_src_disable(hdmi);
919 mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
920
921 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
922 switch (sample_rate) {
923 case 32000:
924 case 44100:
925 case 48000:
926 case 88200:
927 case 96000:
928 break;
929 default:
930 return -EINVAL;
931 }
932 mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
933 } else {
934 switch (sample_rate) {
935 case 32000:
936 case 44100:
937 case 48000:
938 break;
939 default:
940 return -EINVAL;
941 }
942 mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
943 }
944
945 mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
946
947 mtk_hdmi_hw_aud_src_reenable(hdmi);
948 return 0;
949}
950
951static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
952 struct drm_display_mode *display_mode)
953{
954 mtk_hdmi_hw_aud_mute(hdmi);
955 mtk_hdmi_aud_enable_packet(hdmi, false);
956
957 mtk_hdmi_aud_set_input(hdmi);
958 mtk_hdmi_aud_set_src(hdmi, display_mode);
959 mtk_hdmi_hw_aud_set_channel_status(hdmi,
960 hdmi->aud_param.codec_params.iec.status);
961
962 usleep_range(50, 100);
963
964 mtk_hdmi_aud_on_off_hw_ncts(hdmi, true);
965 mtk_hdmi_aud_enable_packet(hdmi, true);
966 mtk_hdmi_hw_aud_unmute(hdmi);
967 return 0;
968}
969
970static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
971 struct drm_display_mode *mode)
972{
973 struct hdmi_avi_infoframe frame;
974 u8 buffer[17];
975 ssize_t err;
976
977 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
978 if (err < 0) {
979 dev_err(hdmi->dev,
980 "Failed to get AVI infoframe from mode: %zd\n", err);
981 return err;
982 }
983
984 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
985 if (err < 0) {
986 dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
987 return err;
988 }
989
990 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
991 return 0;
992}
993
994static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
995 const char *vendor,
996 const char *product)
997{
998 struct hdmi_spd_infoframe frame;
999 u8 buffer[29];
1000 ssize_t err;
1001
1002 err = hdmi_spd_infoframe_init(&frame, vendor, product);
1003 if (err < 0) {
1004 dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
1005 err);
1006 return err;
1007 }
1008
1009 err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
1010 if (err < 0) {
1011 dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
1012 return err;
1013 }
1014
1015 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1016 return 0;
1017}
1018
1019static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
1020{
1021 struct hdmi_audio_infoframe frame;
1022 u8 buffer[14];
1023 ssize_t err;
1024
1025 err = hdmi_audio_infoframe_init(&frame);
1026 if (err < 0) {
1027 dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
1028 err);
1029 return err;
1030 }
1031
1032 frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
1033 frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
1034 frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
1035 frame.channels = mtk_hdmi_aud_get_chnl_count(
1036 hdmi->aud_param.aud_input_chan_type);
1037
1038 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
1039 if (err < 0) {
1040 dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
1041 err);
1042 return err;
1043 }
1044
1045 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1046 return 0;
1047}
1048
1049static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
1050 struct drm_display_mode *mode)
1051{
1052 struct hdmi_vendor_infoframe frame;
1053 u8 buffer[10];
1054 ssize_t err;
1055
1056 err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
1057 if (err) {
1058 dev_err(hdmi->dev,
1059 "Failed to get vendor infoframe from mode: %zd\n", err);
1060 return err;
1061 }
1062
1063 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
1064 if (err) {
1065 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
1066 err);
1067 return err;
1068 }
1069
1070 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1071 return 0;
1072}
1073
1074static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
1075{
1076 struct hdmi_audio_param *aud_param = &hdmi->aud_param;
1077
1078 hdmi->csp = HDMI_COLORSPACE_RGB;
1079 aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
1080 aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
1081 aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
1082 aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
1083 aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
1084 aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
1085
1086 return 0;
1087}
1088
1089void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
1090{
1091 mtk_hdmi_aud_enable_packet(hdmi, true);
1092 hdmi->audio_enable = true;
1093}
1094
1095void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
1096{
1097 mtk_hdmi_aud_enable_packet(hdmi, false);
1098 hdmi->audio_enable = false;
1099}
1100
1101int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
1102 struct hdmi_audio_param *param)
1103{
1104 if (!hdmi->audio_enable) {
1105 dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
1106 return -EINVAL;
1107 }
1108 dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
1109 param->aud_codec, param->aud_input_type,
1110 param->aud_input_chan_type, param->codec_params.sample_rate);
1111 memcpy(&hdmi->aud_param, param, sizeof(*param));
1112 return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
1113}
1114
1115static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
1116 struct drm_display_mode *mode)
1117{
1118 int ret;
1119
1120 mtk_hdmi_hw_vid_black(hdmi, true);
1121 mtk_hdmi_hw_aud_mute(hdmi);
1122 mtk_hdmi_hw_send_av_mute(hdmi);
1123 phy_power_off(hdmi->phy);
1124
1125 ret = mtk_hdmi_video_change_vpll(hdmi,
1126 mode->clock * 1000);
1127 if (ret) {
1128 dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
1129 return ret;
1130 }
1131 mtk_hdmi_video_set_display_mode(hdmi, mode);
1132
1133 phy_power_on(hdmi->phy);
1134 mtk_hdmi_aud_output_config(hdmi, mode);
1135
1136 mtk_hdmi_setup_audio_infoframe(hdmi);
1137 mtk_hdmi_setup_avi_infoframe(hdmi, mode);
1138 mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
1139 if (mode->flags & DRM_MODE_FLAG_3D_MASK)
1140 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
1141
1142 mtk_hdmi_hw_vid_black(hdmi, false);
1143 mtk_hdmi_hw_aud_unmute(hdmi);
1144 mtk_hdmi_hw_send_av_unmute(hdmi);
1145
1146 return 0;
1147}
1148
1149static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
1150 [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
1151 [MTK_HDMI_CLK_HDMI_PLL] = "pll",
1152 [MTK_HDMI_CLK_AUD_BCLK] = "bclk",
1153 [MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
1154};
1155
1156static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
1157 struct device_node *np)
1158{
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
1162 hdmi->clk[i] = of_clk_get_by_name(np,
1163 mtk_hdmi_clk_names[i]);
1164 if (IS_ERR(hdmi->clk[i]))
1165 return PTR_ERR(hdmi->clk[i]);
1166 }
1167 return 0;
1168}
1169
1170static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
1171{
1172 int ret;
1173
1174 ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1175 if (ret)
1176 return ret;
1177
1178 ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
1179 if (ret)
1180 goto err;
1181
1182 return 0;
1183err:
1184 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1185 return ret;
1186}
1187
1188static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
1189{
1190 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1191 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
1192}
1193
1194static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
1195 bool force)
1196{
1197 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1198
1199 return mtk_cec_hpd_high(hdmi->cec_dev) ?
1200 connector_status_connected : connector_status_disconnected;
1201}
1202
1203static void hdmi_conn_destroy(struct drm_connector *conn)
1204{
1205 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1206
1207 mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
1208
1209 drm_connector_cleanup(conn);
1210}
1211
1212static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
1213{
1214 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1215 struct edid *edid;
1216 int ret;
1217
1218 if (!hdmi->ddc_adpt)
1219 return -ENODEV;
1220
1221 edid = drm_get_edid(conn, hdmi->ddc_adpt);
1222 if (!edid)
1223 return -ENODEV;
1224
1225 hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
1226
1227 drm_mode_connector_update_edid_property(conn, edid);
1228
1229 ret = drm_add_edid_modes(conn, edid);
1230 drm_edid_to_eld(conn, edid);
1231 kfree(edid);
1232 return ret;
1233}
1234
1235static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
1236 struct drm_display_mode *mode)
1237{
1238 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1239
1240 dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
1241 mode->hdisplay, mode->vdisplay, mode->vrefresh,
1242 !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
1243
1244 if (hdmi->bridge.next) {
1245 struct drm_display_mode adjusted_mode;
1246
1247 drm_mode_copy(&adjusted_mode, mode);
1248 if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
1249 &adjusted_mode))
1250 return MODE_BAD;
1251 }
1252
1253 if (mode->clock < 27000)
1254 return MODE_CLOCK_LOW;
1255 if (mode->clock > 297000)
1256 return MODE_CLOCK_HIGH;
1257
1258 return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
1259}
1260
1261static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
1262{
1263 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1264
1265 return hdmi->bridge.encoder;
1266}
1267
1268static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
1269 .dpms = drm_atomic_helper_connector_dpms,
1270 .detect = hdmi_conn_detect,
1271 .fill_modes = drm_helper_probe_single_connector_modes,
1272 .destroy = hdmi_conn_destroy,
1273 .reset = drm_atomic_helper_connector_reset,
1274 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1275 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1276};
1277
1278static const struct drm_connector_helper_funcs
1279 mtk_hdmi_connector_helper_funcs = {
1280 .get_modes = mtk_hdmi_conn_get_modes,
1281 .mode_valid = mtk_hdmi_conn_mode_valid,
1282 .best_encoder = mtk_hdmi_conn_best_enc,
1283};
1284
1285static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
1286{
1287 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1288
1289 if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
1290 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
1291}
1292
1293/*
1294 * Bridge callbacks
1295 */
1296
1297static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
1298{
1299 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1300 int ret;
1301
1302 ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
1303 &mtk_hdmi_connector_funcs,
1304 DRM_MODE_CONNECTOR_HDMIA);
1305 if (ret) {
1306 dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
1307 return ret;
1308 }
1309 drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
1310
1311 hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
1312 hdmi->conn.interlace_allowed = true;
1313 hdmi->conn.doublescan_allowed = false;
1314
1315 ret = drm_mode_connector_attach_encoder(&hdmi->conn,
1316 bridge->encoder);
1317 if (ret) {
1318 dev_err(hdmi->dev,
1319 "Failed to attach connector to encoder: %d\n", ret);
1320 return ret;
1321 }
1322
1323 if (bridge->next) {
1324 bridge->next->encoder = bridge->encoder;
1325 ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
1326 if (ret) {
1327 dev_err(hdmi->dev,
1328 "Failed to attach external bridge: %d\n", ret);
1329 return ret;
1330 }
1331 }
1332
1333 mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
1334
1335 return 0;
1336}
1337
1338static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
1339 const struct drm_display_mode *mode,
1340 struct drm_display_mode *adjusted_mode)
1341{
1342 return true;
1343}
1344
1345static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
1346{
1347 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1348
1349 if (!hdmi->enabled)
1350 return;
1351
1352 phy_power_off(hdmi->phy);
1353 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
1354 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
1355
1356 hdmi->enabled = false;
1357}
1358
1359static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
1360{
1361 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1362
1363 if (!hdmi->powered)
1364 return;
1365
1366 mtk_hdmi_hw_1p4_version_enable(hdmi, true);
1367 mtk_hdmi_hw_make_reg_writable(hdmi, false);
1368
1369 hdmi->powered = false;
1370}
1371
1372static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
1373 struct drm_display_mode *mode,
1374 struct drm_display_mode *adjusted_mode)
1375{
1376 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1377
1378 dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
1379 adjusted_mode->name, adjusted_mode->hdisplay);
1380 dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
1381 adjusted_mode->hsync_start, adjusted_mode->hsync_end,
1382 adjusted_mode->htotal);
1383 dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
1384 adjusted_mode->hskew, adjusted_mode->vdisplay);
1385 dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
1386 adjusted_mode->vsync_start, adjusted_mode->vsync_end,
1387 adjusted_mode->vtotal);
1388 dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
1389 adjusted_mode->vscan, adjusted_mode->flags);
1390
1391 drm_mode_copy(&hdmi->mode, adjusted_mode);
1392}
1393
1394static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
1395{
1396 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1397
1398 mtk_hdmi_hw_make_reg_writable(hdmi, true);
1399 mtk_hdmi_hw_1p4_version_enable(hdmi, true);
1400
1401 hdmi->powered = true;
1402}
1403
1404static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
1405{
1406 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1407
1408 mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
1409 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
1410 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
1411 phy_power_on(hdmi->phy);
1412
1413 hdmi->enabled = true;
1414}
1415
1416static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
1417 .attach = mtk_hdmi_bridge_attach,
1418 .mode_fixup = mtk_hdmi_bridge_mode_fixup,
1419 .disable = mtk_hdmi_bridge_disable,
1420 .post_disable = mtk_hdmi_bridge_post_disable,
1421 .mode_set = mtk_hdmi_bridge_mode_set,
1422 .pre_enable = mtk_hdmi_bridge_pre_enable,
1423 .enable = mtk_hdmi_bridge_enable,
1424};
1425
1426static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
1427 struct platform_device *pdev)
1428{
1429 struct device *dev = &pdev->dev;
1430 struct device_node *np = dev->of_node;
1431 struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
1432 struct platform_device *cec_pdev;
1433 struct regmap *regmap;
1434 struct resource *mem;
1435 int ret;
1436
1437 ret = mtk_hdmi_get_all_clk(hdmi, np);
1438 if (ret) {
1439 dev_err(dev, "Failed to get clocks: %d\n", ret);
1440 return ret;
1441 }
1442
1443 /* The CEC module handles HDMI hotplug detection */
1444 cec_np = of_find_compatible_node(np->parent, NULL,
1445 "mediatek,mt8173-cec");
1446 if (!cec_np) {
1447 dev_err(dev, "Failed to find CEC node\n");
1448 return -EINVAL;
1449 }
1450
1451 cec_pdev = of_find_device_by_node(cec_np);
1452 if (!cec_pdev) {
1453 dev_err(hdmi->dev, "Waiting for CEC device %s\n",
1454 cec_np->full_name);
1455 return -EPROBE_DEFER;
1456 }
1457 hdmi->cec_dev = &cec_pdev->dev;
1458
1459 /*
1460 * The mediatek,syscon-hdmi property contains a phandle link to the
1461 * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
1462 * registers it contains.
1463 */
1464 regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
1465 ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
1466 &hdmi->sys_offset);
1467 if (IS_ERR(regmap))
1468 ret = PTR_ERR(regmap);
1469 if (ret) {
1470 ret = PTR_ERR(regmap);
1471 dev_err(dev,
1472 "Failed to get system configuration registers: %d\n",
1473 ret);
1474 return ret;
1475 }
1476 hdmi->sys_regmap = regmap;
1477
1478 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1479 hdmi->regs = devm_ioremap_resource(dev, mem);
1480 if (IS_ERR(hdmi->regs))
1481 return PTR_ERR(hdmi->regs);
1482
1483 port = of_graph_get_port_by_id(np, 1);
1484 if (!port) {
1485 dev_err(dev, "Missing output port node\n");
1486 return -EINVAL;
1487 }
1488
1489 ep = of_get_child_by_name(port, "endpoint");
1490 if (!ep) {
1491 dev_err(dev, "Missing endpoint node in port %s\n",
1492 port->full_name);
1493 of_node_put(port);
1494 return -EINVAL;
1495 }
1496 of_node_put(port);
1497
1498 remote = of_graph_get_remote_port_parent(ep);
1499 if (!remote) {
1500 dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
1501 ep->full_name);
1502 of_node_put(ep);
1503 return -EINVAL;
1504 }
1505 of_node_put(ep);
1506
1507 if (!of_device_is_compatible(remote, "hdmi-connector")) {
1508 hdmi->bridge.next = of_drm_find_bridge(remote);
1509 if (!hdmi->bridge.next) {
1510 dev_err(dev, "Waiting for external bridge\n");
1511 of_node_put(remote);
1512 return -EPROBE_DEFER;
1513 }
1514 }
1515
1516 i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
1517 if (!i2c_np) {
1518 dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
1519 remote->full_name);
1520 of_node_put(remote);
1521 return -EINVAL;
1522 }
1523 of_node_put(remote);
1524
1525 hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
1526 if (!hdmi->ddc_adpt) {
1527 dev_err(dev, "Failed to get ddc i2c adapter by node\n");
1528 return -EINVAL;
1529 }
1530
1531 return 0;
1532}
1533
1534/*
1535 * HDMI audio codec callbacks
1536 */
1537
1538static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
1539 struct hdmi_codec_daifmt *daifmt,
1540 struct hdmi_codec_params *params)
1541{
1542 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1543 struct hdmi_audio_param hdmi_params;
1544 unsigned int chan = params->cea.channels;
1545
1546 dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
1547 params->sample_rate, params->sample_width, chan);
1548
1549 if (!hdmi->bridge.encoder)
1550 return -ENODEV;
1551
1552 switch (chan) {
1553 case 2:
1554 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
1555 break;
1556 case 4:
1557 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
1558 break;
1559 case 6:
1560 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
1561 break;
1562 case 8:
1563 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
1564 break;
1565 default:
1566 dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
1567 return -EINVAL;
1568 }
1569
1570 switch (params->sample_rate) {
1571 case 32000:
1572 case 44100:
1573 case 48000:
1574 case 88200:
1575 case 96000:
1576 case 176400:
1577 case 192000:
1578 break;
1579 default:
1580 dev_err(hdmi->dev, "rate[%d] not supported!\n",
1581 params->sample_rate);
1582 return -EINVAL;
1583 }
1584
1585 switch (daifmt->fmt) {
1586 case HDMI_I2S:
1587 hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
1588 hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
1589 hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
1590 hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
1591 hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
1592 break;
1593 default:
1594 dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
1595 daifmt->fmt);
1596 return -EINVAL;
1597 }
1598
1599 memcpy(&hdmi_params.codec_params, params,
1600 sizeof(hdmi_params.codec_params));
1601
1602 mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
1603
1604 return 0;
1605}
1606
1607static int mtk_hdmi_audio_startup(struct device *dev, void *data)
1608{
1609 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1610
1611 dev_dbg(dev, "%s\n", __func__);
1612
1613 mtk_hdmi_audio_enable(hdmi);
1614
1615 return 0;
1616}
1617
1618static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
1619{
1620 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1621
1622 dev_dbg(dev, "%s\n", __func__);
1623
1624 mtk_hdmi_audio_disable(hdmi);
1625}
1626
1627int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
1628{
1629 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1630
1631 dev_dbg(dev, "%s(%d)\n", __func__, enable);
1632
1633 if (enable)
1634 mtk_hdmi_hw_aud_mute(hdmi);
1635 else
1636 mtk_hdmi_hw_aud_unmute(hdmi);
1637
1638 return 0;
1639}
1640
1641static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
1642{
1643 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1644
1645 dev_dbg(dev, "%s\n", __func__);
1646
1647 memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
1648
1649 return 0;
1650}
1651
1652static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
1653 .hw_params = mtk_hdmi_audio_hw_params,
1654 .audio_startup = mtk_hdmi_audio_startup,
1655 .audio_shutdown = mtk_hdmi_audio_shutdown,
1656 .digital_mute = mtk_hdmi_audio_digital_mute,
1657 .get_eld = mtk_hdmi_audio_get_eld,
1658};
1659
1660static void mtk_hdmi_register_audio_driver(struct device *dev)
1661{
1662 struct hdmi_codec_pdata codec_data = {
1663 .ops = &mtk_hdmi_audio_codec_ops,
1664 .max_i2s_channels = 2,
1665 .i2s = 1,
1666 };
1667 struct platform_device *pdev;
1668
1669 pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
1670 PLATFORM_DEVID_AUTO, &codec_data,
1671 sizeof(codec_data));
1672 if (IS_ERR(pdev))
1673 return;
1674
1675 DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
1676}
1677
1678static int mtk_drm_hdmi_probe(struct platform_device *pdev)
1679{
1680 struct mtk_hdmi *hdmi;
1681 struct device *dev = &pdev->dev;
1682 int ret;
1683
1684 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
1685 if (!hdmi)
1686 return -ENOMEM;
1687
1688 hdmi->dev = dev;
1689
1690 ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
1691 if (ret)
1692 return ret;
1693
1694 hdmi->phy = devm_phy_get(dev, "hdmi");
1695 if (IS_ERR(hdmi->phy)) {
1696 ret = PTR_ERR(hdmi->phy);
1697 dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
1698 return ret;
1699 }
1700
1701 platform_set_drvdata(pdev, hdmi);
1702
1703 ret = mtk_hdmi_output_init(hdmi);
1704 if (ret) {
1705 dev_err(dev, "Failed to initialize hdmi output\n");
1706 return ret;
1707 }
1708
1709 mtk_hdmi_register_audio_driver(dev);
1710
1711 hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
1712 hdmi->bridge.of_node = pdev->dev.of_node;
1713 ret = drm_bridge_add(&hdmi->bridge);
1714 if (ret) {
1715 dev_err(dev, "failed to add bridge, ret = %d\n", ret);
1716 return ret;
1717 }
1718
1719 ret = mtk_hdmi_clk_enable_audio(hdmi);
1720 if (ret) {
1721 dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
1722 goto err_bridge_remove;
1723 }
1724
1725 dev_dbg(dev, "mediatek hdmi probe success\n");
1726 return 0;
1727
1728err_bridge_remove:
1729 drm_bridge_remove(&hdmi->bridge);
1730 return ret;
1731}
1732
1733static int mtk_drm_hdmi_remove(struct platform_device *pdev)
1734{
1735 struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
1736
1737 drm_bridge_remove(&hdmi->bridge);
1738 mtk_hdmi_clk_disable_audio(hdmi);
1739 return 0;
1740}
1741
1742#ifdef CONFIG_PM_SLEEP
1743static int mtk_hdmi_suspend(struct device *dev)
1744{
1745 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1746
1747 mtk_hdmi_clk_disable_audio(hdmi);
1748 dev_dbg(dev, "hdmi suspend success!\n");
1749 return 0;
1750}
1751
1752static int mtk_hdmi_resume(struct device *dev)
1753{
1754 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1755 int ret = 0;
1756
1757 ret = mtk_hdmi_clk_enable_audio(hdmi);
1758 if (ret) {
1759 dev_err(dev, "hdmi resume failed!\n");
1760 return ret;
1761 }
1762
1763 dev_dbg(dev, "hdmi resume success!\n");
1764 return 0;
1765}
1766#endif
1767static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
1768 mtk_hdmi_suspend, mtk_hdmi_resume);
1769
1770static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
1771 { .compatible = "mediatek,mt8173-hdmi", },
1772 {}
1773};
1774
1775static struct platform_driver mtk_hdmi_driver = {
1776 .probe = mtk_drm_hdmi_probe,
1777 .remove = mtk_drm_hdmi_remove,
1778 .driver = {
1779 .name = "mediatek-drm-hdmi",
1780 .of_match_table = mtk_drm_hdmi_of_ids,
1781 .pm = &mtk_hdmi_pm_ops,
1782 },
1783};
1784
1785static struct platform_driver * const mtk_hdmi_drivers[] = {
1786 &mtk_hdmi_phy_driver,
1787 &mtk_hdmi_ddc_driver,
1788 &mtk_cec_driver,
1789 &mtk_hdmi_driver,
1790};
1791
1792static int __init mtk_hdmitx_init(void)
1793{
1794 int ret;
1795 int i;
1796
1797 for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
1798 ret = platform_driver_register(mtk_hdmi_drivers[i]);
1799 if (ret < 0) {
1800 pr_err("Failed to register %s driver: %d\n",
1801 mtk_hdmi_drivers[i]->driver.name, ret);
1802 goto err;
1803 }
1804 }
1805
1806 return 0;
1807
1808err:
1809 while (--i >= 0)
1810 platform_driver_unregister(mtk_hdmi_drivers[i]);
1811
1812 return ret;
1813}
1814
1815static void __exit mtk_hdmitx_exit(void)
1816{
1817 int i;
1818
1819 for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
1820 platform_driver_unregister(mtk_hdmi_drivers[i]);
1821}
1822
1823module_init(mtk_hdmitx_init);
1824module_exit(mtk_hdmitx_exit);
1825
1826MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
1827MODULE_DESCRIPTION("MediaTek HDMI Driver");
1828MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
new file mode 100644
index 000000000000..6371b3de1ff6
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_HDMI_CTRL_H
15#define _MTK_HDMI_CTRL_H
16
17struct platform_driver;
18
19extern struct platform_driver mtk_cec_driver;
20extern struct platform_driver mtk_hdmi_ddc_driver;
21extern struct platform_driver mtk_hdmi_phy_driver;
22
23#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
new file mode 100644
index 000000000000..33c9e1bdb114
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -0,0 +1,358 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/i2c.h>
17#include <linux/time.h>
18#include <linux/delay.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24#include <linux/io.h>
25#include <linux/iopoll.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_platform.h>
29
30#define SIF1_CLOK (288)
31#define DDC_DDCMCTL0 (0x0)
32#define DDCM_ODRAIN BIT(31)
33#define DDCM_CLK_DIV_OFFSET (16)
34#define DDCM_CLK_DIV_MASK (0xfff << 16)
35#define DDCM_CS_STATUS BIT(4)
36#define DDCM_SCL_STATE BIT(3)
37#define DDCM_SDA_STATE BIT(2)
38#define DDCM_SM0EN BIT(1)
39#define DDCM_SCL_STRECH BIT(0)
40#define DDC_DDCMCTL1 (0x4)
41#define DDCM_ACK_OFFSET (16)
42#define DDCM_ACK_MASK (0xff << 16)
43#define DDCM_PGLEN_OFFSET (8)
44#define DDCM_PGLEN_MASK (0x7 << 8)
45#define DDCM_SIF_MODE_OFFSET (4)
46#define DDCM_SIF_MODE_MASK (0x7 << 4)
47#define DDCM_START (0x1)
48#define DDCM_WRITE_DATA (0x2)
49#define DDCM_STOP (0x3)
50#define DDCM_READ_DATA_NO_ACK (0x4)
51#define DDCM_READ_DATA_ACK (0x5)
52#define DDCM_TRI BIT(0)
53#define DDC_DDCMD0 (0x8)
54#define DDCM_DATA3 (0xff << 24)
55#define DDCM_DATA2 (0xff << 16)
56#define DDCM_DATA1 (0xff << 8)
57#define DDCM_DATA0 (0xff << 0)
58#define DDC_DDCMD1 (0xc)
59#define DDCM_DATA7 (0xff << 24)
60#define DDCM_DATA6 (0xff << 16)
61#define DDCM_DATA5 (0xff << 8)
62#define DDCM_DATA4 (0xff << 0)
63
64struct mtk_hdmi_ddc {
65 struct i2c_adapter adap;
66 struct clk *clk;
67 void __iomem *regs;
68};
69
70static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
71 unsigned int val)
72{
73 writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
74}
75
76static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
77 unsigned int val)
78{
79 writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
80}
81
82static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
83 unsigned int val)
84{
85 return (readl(ddc->regs + offset) & val) == val;
86}
87
88static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
89 unsigned int mask, unsigned int shift,
90 unsigned int val)
91{
92 unsigned int tmp;
93
94 tmp = readl(ddc->regs + offset);
95 tmp &= ~mask;
96 tmp |= (val << shift) & mask;
97 writel(tmp, ddc->regs + offset);
98}
99
100static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
101 unsigned int offset, unsigned int mask,
102 unsigned int shift)
103{
104 return (readl(ddc->regs + offset) & mask) >> shift;
105}
106
107static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
108{
109 u32 val;
110
111 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
112 DDCM_SIF_MODE_OFFSET, mode);
113 sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
114 readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
115 (val & DDCM_TRI) != DDCM_TRI, 4, 20000);
116}
117
118static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
119{
120 struct device *dev = ddc->adap.dev.parent;
121 u32 remain_count, ack_count, ack_final, read_count, temp_count;
122 u32 index = 0;
123 u32 ack;
124 int i;
125
126 ddcm_trigger_mode(ddc, DDCM_START);
127 sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
128 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
129 0x00);
130 ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
131 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
132 dev_dbg(dev, "ack = 0x%x\n", ack);
133 if (ack != 0x01) {
134 dev_err(dev, "i2c ack err!\n");
135 return -ENXIO;
136 }
137
138 remain_count = msg->len;
139 ack_count = (msg->len - 1) / 8;
140 ack_final = 0;
141
142 while (remain_count > 0) {
143 if (ack_count > 0) {
144 read_count = 8;
145 ack_final = 0;
146 ack_count--;
147 } else {
148 read_count = remain_count;
149 ack_final = 1;
150 }
151
152 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
153 DDCM_PGLEN_OFFSET, read_count - 1);
154 ddcm_trigger_mode(ddc, (ack_final == 1) ?
155 DDCM_READ_DATA_NO_ACK :
156 DDCM_READ_DATA_ACK);
157
158 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
159 DDCM_ACK_OFFSET);
160 temp_count = 0;
161 while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
162 temp_count++;
163 if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
164 ((ack_final == 0) && (temp_count != read_count))) {
165 dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
166 break;
167 }
168
169 for (i = read_count; i >= 1; i--) {
170 int shift;
171 int offset;
172
173 if (i > 4) {
174 offset = DDC_DDCMD1;
175 shift = (i - 5) * 8;
176 } else {
177 offset = DDC_DDCMD0;
178 shift = (i - 1) * 8;
179 }
180
181 msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
182 0xff << shift,
183 shift);
184 }
185
186 remain_count -= read_count;
187 index += read_count;
188 }
189
190 return 0;
191}
192
193static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
194{
195 struct device *dev = ddc->adap.dev.parent;
196 u32 ack;
197
198 ddcm_trigger_mode(ddc, DDCM_START);
199 sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
200 sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
201 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
202 0x1);
203 ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
204
205 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
206 dev_dbg(dev, "ack = %d\n", ack);
207
208 if (ack != 0x03) {
209 dev_err(dev, "i2c ack err!\n");
210 return -EIO;
211 }
212
213 return 0;
214}
215
216static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
217 struct i2c_msg *msgs, int num)
218{
219 struct mtk_hdmi_ddc *ddc = adapter->algo_data;
220 struct device *dev = adapter->dev.parent;
221 int ret;
222 int i;
223
224 if (!ddc) {
225 dev_err(dev, "invalid arguments\n");
226 return -EINVAL;
227 }
228
229 sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
230 sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
231 sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
232
233 if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
234 dev_err(dev, "ddc line is busy!\n");
235 return -EBUSY;
236 }
237
238 sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
239 DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
240
241 for (i = 0; i < num; i++) {
242 struct i2c_msg *msg = &msgs[i];
243
244 dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
245 msg->addr, msg->flags, msg->len);
246
247 if (msg->flags & I2C_M_RD)
248 ret = mtk_hdmi_ddc_read_msg(ddc, msg);
249 else
250 ret = mtk_hdmi_ddc_write_msg(ddc, msg);
251 if (ret < 0)
252 goto xfer_end;
253 }
254
255 ddcm_trigger_mode(ddc, DDCM_STOP);
256
257 return i;
258
259xfer_end:
260 ddcm_trigger_mode(ddc, DDCM_STOP);
261 dev_err(dev, "ddc failed!\n");
262 return ret;
263}
264
265static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
266{
267 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
268}
269
270static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
271 .master_xfer = mtk_hdmi_ddc_xfer,
272 .functionality = mtk_hdmi_ddc_func,
273};
274
275static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
276{
277 struct device *dev = &pdev->dev;
278 struct mtk_hdmi_ddc *ddc;
279 struct resource *mem;
280 int ret;
281
282 ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
283 if (!ddc)
284 return -ENOMEM;
285
286 ddc->clk = devm_clk_get(dev, "ddc-i2c");
287 if (IS_ERR(ddc->clk)) {
288 dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
289 return PTR_ERR(ddc->clk);
290 }
291
292 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
293 ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
294 if (IS_ERR(ddc->regs))
295 return PTR_ERR(ddc->regs);
296
297 ret = clk_prepare_enable(ddc->clk);
298 if (ret) {
299 dev_err(dev, "enable ddc clk failed!\n");
300 return ret;
301 }
302
303 strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
304 ddc->adap.owner = THIS_MODULE;
305 ddc->adap.class = I2C_CLASS_DDC;
306 ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
307 ddc->adap.retries = 3;
308 ddc->adap.dev.of_node = dev->of_node;
309 ddc->adap.algo_data = ddc;
310 ddc->adap.dev.parent = &pdev->dev;
311
312 ret = i2c_add_adapter(&ddc->adap);
313 if (ret < 0) {
314 dev_err(dev, "failed to add bus to i2c core\n");
315 goto err_clk_disable;
316 }
317
318 platform_set_drvdata(pdev, ddc);
319
320 dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
321 dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
322 dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
323 &mem->end);
324
325 return 0;
326
327err_clk_disable:
328 clk_disable_unprepare(ddc->clk);
329 return ret;
330}
331
332static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
333{
334 struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
335
336 i2c_del_adapter(&ddc->adap);
337 clk_disable_unprepare(ddc->clk);
338
339 return 0;
340}
341
342static const struct of_device_id mtk_hdmi_ddc_match[] = {
343 { .compatible = "mediatek,mt8173-hdmi-ddc", },
344 {},
345};
346
347struct platform_driver mtk_hdmi_ddc_driver = {
348 .probe = mtk_hdmi_ddc_probe,
349 .remove = mtk_hdmi_ddc_remove,
350 .driver = {
351 .name = "mediatek-hdmi-ddc",
352 .of_match_table = mtk_hdmi_ddc_match,
353 },
354};
355
356MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
357MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
358MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
new file mode 100644
index 000000000000..a5cb07d12c9c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_HDMI_REGS_H
15#define _MTK_HDMI_REGS_H
16
17#define GRL_INT_MASK 0x18
18#define GRL_IFM_PORT 0x188
19#define GRL_CH_SWAP 0x198
20#define LR_SWAP BIT(0)
21#define LFE_CC_SWAP BIT(1)
22#define LSRS_SWAP BIT(2)
23#define RLS_RRS_SWAP BIT(3)
24#define LR_STATUS_SWAP BIT(4)
25#define GRL_I2S_C_STA0 0x140
26#define GRL_I2S_C_STA1 0x144
27#define GRL_I2S_C_STA2 0x148
28#define GRL_I2S_C_STA3 0x14C
29#define GRL_I2S_C_STA4 0x150
30#define GRL_I2S_UV 0x154
31#define I2S_UV_V BIT(0)
32#define I2S_UV_U BIT(1)
33#define I2S_UV_CH_EN_MASK 0x3c
34#define I2S_UV_CH_EN(x) BIT((x) + 2)
35#define I2S_UV_TMDS_DEBUG BIT(6)
36#define I2S_UV_NORMAL_INFO_INV BIT(7)
37#define GRL_ACP_ISRC_CTRL 0x158
38#define VS_EN BIT(0)
39#define ACP_EN BIT(1)
40#define ISRC1_EN BIT(2)
41#define ISRC2_EN BIT(3)
42#define GAMUT_EN BIT(4)
43#define GRL_CTS_CTRL 0x160
44#define CTS_CTRL_SOFT BIT(0)
45#define GRL_INT 0x14
46#define INT_MDI BIT(0)
47#define INT_HDCP BIT(1)
48#define INT_FIFO_O BIT(2)
49#define INT_FIFO_U BIT(3)
50#define INT_IFM_ERR BIT(4)
51#define INT_INF_DONE BIT(5)
52#define INT_NCTS_DONE BIT(6)
53#define INT_CTRL_PKT_DONE BIT(7)
54#define GRL_INT_MASK 0x18
55#define GRL_CTRL 0x1C
56#define CTRL_GEN_EN BIT(2)
57#define CTRL_SPD_EN BIT(3)
58#define CTRL_MPEG_EN BIT(4)
59#define CTRL_AUDIO_EN BIT(5)
60#define CTRL_AVI_EN BIT(6)
61#define CTRL_AVMUTE BIT(7)
62#define GRL_STATUS 0x20
63#define STATUS_HTPLG BIT(0)
64#define STATUS_PORD BIT(1)
65#define GRL_DIVN 0x170
66#define NCTS_WRI_ANYTIME BIT(6)
67#define GRL_AUDIO_CFG 0x17C
68#define AUDIO_ZERO BIT(0)
69#define HIGH_BIT_RATE BIT(1)
70#define SACD_DST BIT(2)
71#define DST_NORMAL_DOUBLE BIT(3)
72#define DSD_INV BIT(4)
73#define LR_INV BIT(5)
74#define LR_MIX BIT(6)
75#define DSD_SEL BIT(7)
76#define GRL_NCTS 0x184
77#define GRL_CH_SW0 0x18C
78#define GRL_CH_SW1 0x190
79#define GRL_CH_SW2 0x194
80#define CH_SWITCH(from, to) ((from) << ((to) * 3))
81#define GRL_INFOFRM_VER 0x19C
82#define GRL_INFOFRM_TYPE 0x1A0
83#define GRL_INFOFRM_LNG 0x1A4
84#define GRL_MIX_CTRL 0x1B4
85#define MIX_CTRL_SRC_EN BIT(0)
86#define BYPASS_VOLUME BIT(1)
87#define MIX_CTRL_FLAT BIT(7)
88#define GRL_AOUT_CFG 0x1C4
89#define AOUT_BNUM_SEL_MASK 0x03
90#define AOUT_24BIT 0x00
91#define AOUT_20BIT 0x02
92#define AOUT_16BIT 0x03
93#define AOUT_FIFO_ADAP_CTRL BIT(6)
94#define AOUT_BURST_PREAMBLE_EN BIT(7)
95#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \
96 AOUT_FIFO_ADAP_CTRL)
97#define GRL_SHIFT_L1 0x1C0
98#define GRL_SHIFT_R2 0x1B0
99#define AUDIO_PACKET_OFF BIT(6)
100#define GRL_CFG0 0x24
101#define CFG0_I2S_MODE_MASK 0x3
102#define CFG0_I2S_MODE_RTJ 0x1
103#define CFG0_I2S_MODE_LTJ 0x0
104#define CFG0_I2S_MODE_I2S 0x2
105#define CFG0_W_LENGTH_MASK 0x30
106#define CFG0_W_LENGTH_24BIT 0x00
107#define CFG0_W_LENGTH_16BIT 0x10
108#define GRL_CFG1 0x28
109#define CFG1_EDG_SEL BIT(0)
110#define CFG1_SPDIF BIT(1)
111#define CFG1_DVI BIT(2)
112#define CFG1_HDCP_DEBUG BIT(3)
113#define GRL_CFG2 0x2c
114#define CFG2_MHL_DE_SEL BIT(3)
115#define CFG2_MHL_FAKE_DE_SEL BIT(4)
116#define CFG2_MHL_DATA_REMAP BIT(5)
117#define CFG2_NOTICE_EN BIT(6)
118#define CFG2_ACLK_INV BIT(7)
119#define GRL_CFG3 0x30
120#define CFG3_AES_KEY_INDEX_MASK 0x3f
121#define CFG3_CONTROL_PACKET_DELAY BIT(6)
122#define CFG3_KSV_LOAD_START BIT(7)
123#define GRL_CFG4 0x34
124#define CFG4_AES_KEY_LOAD BIT(4)
125#define CFG4_AV_UNMUTE_EN BIT(5)
126#define CFG4_AV_UNMUTE_SET BIT(6)
127#define CFG4_MHL_MODE BIT(7)
128#define GRL_CFG5 0x38
129#define CFG5_CD_RATIO_MASK 0x8F
130#define CFG5_FS128 (0x1 << 4)
131#define CFG5_FS256 (0x2 << 4)
132#define CFG5_FS384 (0x3 << 4)
133#define CFG5_FS512 (0x4 << 4)
134#define CFG5_FS768 (0x6 << 4)
135#define DUMMY_304 0x304
136#define CHMO_SEL (0x3 << 2)
137#define CHM1_SEL (0x3 << 4)
138#define CHM2_SEL (0x3 << 6)
139#define AUDIO_I2S_NCTS_SEL BIT(1)
140#define AUDIO_I2S_NCTS_SEL_64 (1 << 1)
141#define AUDIO_I2S_NCTS_SEL_128 (0 << 1)
142#define NEW_GCP_CTRL BIT(0)
143#define NEW_GCP_CTRL_MERGE BIT(0)
144#define GRL_L_STATUS_0 0x200
145#define GRL_L_STATUS_1 0x204
146#define GRL_L_STATUS_2 0x208
147#define GRL_L_STATUS_3 0x20c
148#define GRL_L_STATUS_4 0x210
149#define GRL_L_STATUS_5 0x214
150#define GRL_L_STATUS_6 0x218
151#define GRL_L_STATUS_7 0x21c
152#define GRL_L_STATUS_8 0x220
153#define GRL_L_STATUS_9 0x224
154#define GRL_L_STATUS_10 0x228
155#define GRL_L_STATUS_11 0x22c
156#define GRL_L_STATUS_12 0x230
157#define GRL_L_STATUS_13 0x234
158#define GRL_L_STATUS_14 0x238
159#define GRL_L_STATUS_15 0x23c
160#define GRL_L_STATUS_16 0x240
161#define GRL_L_STATUS_17 0x244
162#define GRL_L_STATUS_18 0x248
163#define GRL_L_STATUS_19 0x24c
164#define GRL_L_STATUS_20 0x250
165#define GRL_L_STATUS_21 0x254
166#define GRL_L_STATUS_22 0x258
167#define GRL_L_STATUS_23 0x25c
168#define GRL_R_STATUS_0 0x260
169#define GRL_R_STATUS_1 0x264
170#define GRL_R_STATUS_2 0x268
171#define GRL_R_STATUS_3 0x26c
172#define GRL_R_STATUS_4 0x270
173#define GRL_R_STATUS_5 0x274
174#define GRL_R_STATUS_6 0x278
175#define GRL_R_STATUS_7 0x27c
176#define GRL_R_STATUS_8 0x280
177#define GRL_R_STATUS_9 0x284
178#define GRL_R_STATUS_10 0x288
179#define GRL_R_STATUS_11 0x28c
180#define GRL_R_STATUS_12 0x290
181#define GRL_R_STATUS_13 0x294
182#define GRL_R_STATUS_14 0x298
183#define GRL_R_STATUS_15 0x29c
184#define GRL_R_STATUS_16 0x2a0
185#define GRL_R_STATUS_17 0x2a4
186#define GRL_R_STATUS_18 0x2a8
187#define GRL_R_STATUS_19 0x2ac
188#define GRL_R_STATUS_20 0x2b0
189#define GRL_R_STATUS_21 0x2b4
190#define GRL_R_STATUS_22 0x2b8
191#define GRL_R_STATUS_23 0x2bc
192#define GRL_ABIST_CTRL0 0x2D4
193#define GRL_ABIST_CTRL1 0x2D8
194#define ABIST_EN BIT(7)
195#define ABIST_DATA_FMT (0x7 << 0)
196#define VIDEO_CFG_0 0x380
197#define VIDEO_CFG_1 0x384
198#define VIDEO_CFG_2 0x388
199#define VIDEO_CFG_3 0x38c
200#define VIDEO_CFG_4 0x390
201#define VIDEO_SOURCE_SEL BIT(7)
202#define NORMAL_PATH (1 << 7)
203#define GEN_RGB (0 << 7)
204
205#define HDMI_SYS_CFG1C 0x000
206#define HDMI_ON BIT(0)
207#define HDMI_RST BIT(1)
208#define ANLG_ON BIT(2)
209#define CFG10_DVI BIT(3)
210#define HDMI_TST BIT(3)
211#define SYS_KEYMASK1 (0xff << 8)
212#define SYS_KEYMASK2 (0xff << 16)
213#define AUD_OUTSYNC_EN BIT(24)
214#define AUD_OUTSYNC_PRE_EN BIT(25)
215#define I2CM_ON BIT(26)
216#define E2PROM_TYPE_8BIT BIT(27)
217#define MCM_E2PROM_ON BIT(28)
218#define EXT_E2PROM_ON BIT(29)
219#define HTPLG_PIN_SEL_OFF BIT(30)
220#define AES_EFUSE_ENABLE BIT(31)
221#define HDMI_SYS_CFG20 0x004
222#define DEEP_COLOR_MODE_MASK (3 << 1)
223#define COLOR_8BIT_MODE (0 << 1)
224#define COLOR_10BIT_MODE (1 << 1)
225#define COLOR_12BIT_MODE (2 << 1)
226#define COLOR_16BIT_MODE (3 << 1)
227#define DEEP_COLOR_EN BIT(0)
228#define HDMI_AUDIO_TEST_SEL BIT(8)
229#define HDMI2P0_EN BIT(11)
230#define HDMI_OUT_FIFO_EN BIT(16)
231#define HDMI_OUT_FIFO_CLK_INV BIT(17)
232#define MHL_MODE_ON BIT(28)
233#define MHL_PP_MODE BIT(29)
234#define MHL_SYNC_AUTO_EN BIT(30)
235#define HDMI_PCLK_FREE_RUN BIT(31)
236
237#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001
238#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
new file mode 100644
index 000000000000..8a24754b440f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -0,0 +1,515 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/delay.h>
18#include <linux/io.h>
19#include <linux/mfd/syscon.h>
20#include <linux/module.h>
21#include <linux/phy/phy.h>
22#include <linux/platform_device.h>
23#include <linux/types.h>
24
25#define HDMI_CON0 0x00
26#define RG_HDMITX_PLL_EN BIT(31)
27#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
28#define PLL_FBKDIV_SHIFT 24
29#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
30#define PLL_FBKSEL_SHIFT 22
31#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
32#define PREDIV_SHIFT 20
33#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
34#define POSDIV_SHIFT 18
35#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
36#define RG_HDMITX_PLL_IR (0xf << 12)
37#define PLL_IR_SHIFT 12
38#define RG_HDMITX_PLL_IC (0xf << 8)
39#define PLL_IC_SHIFT 8
40#define RG_HDMITX_PLL_BP (0xf << 4)
41#define PLL_BP_SHIFT 4
42#define RG_HDMITX_PLL_BR (0x3 << 2)
43#define PLL_BR_SHIFT 2
44#define RG_HDMITX_PLL_BC (0x3 << 0)
45#define PLL_BC_SHIFT 0
46#define HDMI_CON1 0x04
47#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
48#define PLL_DIVEN_SHIFT 29
49#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
50#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
51#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
52#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
53#define RG_HDMITX_PLL_BAND (0x3f << 16)
54#define RG_HDMITX_PLL_REF_SEL BIT(15)
55#define RG_HDMITX_PLL_BIAS_EN BIT(14)
56#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
57#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
58#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
59#define PLL_TXDIV_SHIFT 10
60#define RG_HDMITX_PLL_LVROD_EN BIT(9)
61#define RG_HDMITX_PLL_MONVC_EN BIT(8)
62#define RG_HDMITX_PLL_MONCK_EN BIT(7)
63#define RG_HDMITX_PLL_MONREF_EN BIT(6)
64#define RG_HDMITX_PLL_TST_EN BIT(5)
65#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
66#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
67#define HDMI_CON2 0x08
68#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
69#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
70#define RG_HDMITX_EN_TX_CKLDO BIT(0)
71#define HDMI_CON3 0x0c
72#define RG_HDMITX_SER_EN (0xf << 28)
73#define RG_HDMITX_PRD_EN (0xf << 24)
74#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
75#define RG_HDMITX_DRV_EN (0xf << 16)
76#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
77#define DRV_IMP_EN_SHIFT 12
78#define RG_HDMITX_MHLCK_FORCE BIT(10)
79#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
80#define RG_HDMITX_MHLCK_EN BIT(8)
81#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
82#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
83#define RG_HDMITX_SER_BIST_TOG BIT(2)
84#define RG_HDMITX_SER_DIN_TOG BIT(1)
85#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
86#define HDMI_CON4 0x10
87#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
88#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
89#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
90#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
91#define PRD_IBIAS_CLK_SHIFT 24
92#define PRD_IBIAS_D2_SHIFT 16
93#define PRD_IBIAS_D1_SHIFT 8
94#define PRD_IBIAS_D0_SHIFT 0
95#define HDMI_CON5 0x14
96#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
97#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
98#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
99#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
100#define DRV_IBIAS_CLK_SHIFT 24
101#define DRV_IBIAS_D2_SHIFT 16
102#define DRV_IBIAS_D1_SHIFT 8
103#define DRV_IBIAS_D0_SHIFT 0
104#define HDMI_CON6 0x18
105#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
106#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
107#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
108#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
109#define DRV_IMP_CLK_SHIFT 24
110#define DRV_IMP_D2_SHIFT 16
111#define DRV_IMP_D1_SHIFT 8
112#define DRV_IMP_D0_SHIFT 0
113#define HDMI_CON7 0x1c
114#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
115#define RG_HDMITX_SER_DIN (0x3ff << 16)
116#define RG_HDMITX_CHLDC_TST (0xf << 12)
117#define RG_HDMITX_CHLCK_TST (0xf << 8)
118#define RG_HDMITX_RESERVE (0xff << 0)
119#define HDMI_CON8 0x20
120#define RGS_HDMITX_2T1_LEV (0xf << 16)
121#define RGS_HDMITX_2T1_EDG (0xf << 12)
122#define RGS_HDMITX_5T1_LEV (0xf << 8)
123#define RGS_HDMITX_5T1_EDG (0xf << 4)
124#define RGS_HDMITX_PLUG_TST BIT(0)
125
126struct mtk_hdmi_phy {
127 void __iomem *regs;
128 struct device *dev;
129 struct clk *pll;
130 struct clk_hw pll_hw;
131 unsigned long pll_rate;
132 u8 drv_imp_clk;
133 u8 drv_imp_d2;
134 u8 drv_imp_d1;
135 u8 drv_imp_d0;
136 u32 ibias;
137 u32 ibias_up;
138};
139
140static const u8 PREDIV[3][4] = {
141 {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
142 {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
143 {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
144};
145
146static const u8 TXDIV[3][4] = {
147 {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
148 {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
149 {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
150};
151
152static const u8 FBKSEL[3][4] = {
153 {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
154 {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
155 {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
156};
157
158static const u8 FBKDIV[3][4] = {
159 {19, 24, 29, 19}, /* 27Mhz */
160 {19, 24, 14, 19}, /* 74Mhz */
161 {19, 24, 14, 19} /* 148Mhz */
162};
163
164static const u8 DIVEN[3][4] = {
165 {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
166 {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
167 {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
168};
169
170static const u8 HTPLLBP[3][4] = {
171 {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
172 {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
173 {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
174};
175
176static const u8 HTPLLBC[3][4] = {
177 {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
178 {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
179 {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
180};
181
182static const u8 HTPLLBR[3][4] = {
183 {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
184 {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
185 {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
186};
187
188static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
189 u32 bits)
190{
191 void __iomem *reg = hdmi_phy->regs + offset;
192 u32 tmp;
193
194 tmp = readl(reg);
195 tmp &= ~bits;
196 writel(tmp, reg);
197}
198
199static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
200 u32 bits)
201{
202 void __iomem *reg = hdmi_phy->regs + offset;
203 u32 tmp;
204
205 tmp = readl(reg);
206 tmp |= bits;
207 writel(tmp, reg);
208}
209
210static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
211 u32 val, u32 mask)
212{
213 void __iomem *reg = hdmi_phy->regs + offset;
214 u32 tmp;
215
216 tmp = readl(reg);
217 tmp = (tmp & ~mask) | (val & mask);
218 writel(tmp, reg);
219}
220
221static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
222{
223 return container_of(hw, struct mtk_hdmi_phy, pll_hw);
224}
225
226static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
227{
228 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
229
230 dev_dbg(hdmi_phy->dev, "%s\n", __func__);
231
232 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
233 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
234 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
235 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
236 usleep_range(100, 150);
237 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
238 usleep_range(100, 150);
239 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
240 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
241
242 return 0;
243}
244
245static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
246{
247 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
248
249 dev_dbg(hdmi_phy->dev, "%s\n", __func__);
250
251 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
252 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
253 usleep_range(100, 150);
254 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
255 usleep_range(100, 150);
256 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
257 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
258 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
259 usleep_range(100, 150);
260}
261
262static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
263 unsigned long parent_rate)
264{
265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
266 unsigned int pre_div;
267 unsigned int div;
268
269 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
270 rate, parent_rate);
271
272 if (rate <= 27000000) {
273 pre_div = 0;
274 div = 3;
275 } else if (rate <= 74250000) {
276 pre_div = 1;
277 div = 2;
278 } else {
279 pre_div = 1;
280 div = 1;
281 }
282
283 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
284 (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
285 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
286 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
287 (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
288 RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
289 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
290 (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
291 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
292 (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
293 RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
294 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
295 (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
296 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
297 (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
298 (0x1 << PLL_BR_SHIFT),
299 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
300 RG_HDMITX_PLL_BR);
301 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
302 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
303 (0x3 << PRD_IBIAS_CLK_SHIFT) |
304 (0x3 << PRD_IBIAS_D2_SHIFT) |
305 (0x3 << PRD_IBIAS_D1_SHIFT) |
306 (0x3 << PRD_IBIAS_D0_SHIFT),
307 RG_HDMITX_PRD_IBIAS_CLK |
308 RG_HDMITX_PRD_IBIAS_D2 |
309 RG_HDMITX_PRD_IBIAS_D1 |
310 RG_HDMITX_PRD_IBIAS_D0);
311 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
312 (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
313 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
314 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
315 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
316 (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
317 (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
318 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
319 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
320 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
321 (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
322 (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
323 (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
324 (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
325 RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
326 RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
327 return 0;
328}
329
330static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
331 unsigned long *parent_rate)
332{
333 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
334
335 hdmi_phy->pll_rate = rate;
336 if (rate <= 74250000)
337 *parent_rate = rate;
338 else
339 *parent_rate = rate / 2;
340
341 return rate;
342}
343
344static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
345 unsigned long parent_rate)
346{
347 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
348
349 return hdmi_phy->pll_rate;
350}
351
352static const struct clk_ops mtk_hdmi_pll_ops = {
353 .prepare = mtk_hdmi_pll_prepare,
354 .unprepare = mtk_hdmi_pll_unprepare,
355 .set_rate = mtk_hdmi_pll_set_rate,
356 .round_rate = mtk_hdmi_pll_round_rate,
357 .recalc_rate = mtk_hdmi_pll_recalc_rate,
358};
359
360static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
361{
362 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
363 RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
364 RG_HDMITX_DRV_EN);
365 usleep_range(100, 150);
366}
367
368static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
369{
370 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
371 RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
372 RG_HDMITX_SER_EN);
373}
374
375static int mtk_hdmi_phy_power_on(struct phy *phy)
376{
377 struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
378 int ret;
379
380 ret = clk_prepare_enable(hdmi_phy->pll);
381 if (ret < 0)
382 return ret;
383
384 mtk_hdmi_phy_enable_tmds(hdmi_phy);
385
386 return 0;
387}
388
389static int mtk_hdmi_phy_power_off(struct phy *phy)
390{
391 struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
392
393 mtk_hdmi_phy_disable_tmds(hdmi_phy);
394 clk_disable_unprepare(hdmi_phy->pll);
395
396 return 0;
397}
398
399static const struct phy_ops mtk_hdmi_phy_ops = {
400 .power_on = mtk_hdmi_phy_power_on,
401 .power_off = mtk_hdmi_phy_power_off,
402 .owner = THIS_MODULE,
403};
404
405static int mtk_hdmi_phy_probe(struct platform_device *pdev)
406{
407 struct device *dev = &pdev->dev;
408 struct mtk_hdmi_phy *hdmi_phy;
409 struct resource *mem;
410 struct clk *ref_clk;
411 const char *ref_clk_name;
412 struct clk_init_data clk_init = {
413 .ops = &mtk_hdmi_pll_ops,
414 .num_parents = 1,
415 .parent_names = (const char * const *)&ref_clk_name,
416 .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
417 };
418 struct phy *phy;
419 struct phy_provider *phy_provider;
420 int ret;
421
422 hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
423 if (!hdmi_phy)
424 return -ENOMEM;
425
426 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
427 hdmi_phy->regs = devm_ioremap_resource(dev, mem);
428 if (IS_ERR(hdmi_phy->regs)) {
429 ret = PTR_ERR(hdmi_phy->regs);
430 dev_err(dev, "Failed to get memory resource: %d\n", ret);
431 return ret;
432 }
433
434 ref_clk = devm_clk_get(dev, "pll_ref");
435 if (IS_ERR(ref_clk)) {
436 ret = PTR_ERR(ref_clk);
437 dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
438 ret);
439 return ret;
440 }
441 ref_clk_name = __clk_get_name(ref_clk);
442
443 ret = of_property_read_string(dev->of_node, "clock-output-names",
444 &clk_init.name);
445 if (ret < 0) {
446 dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
447 return ret;
448 }
449
450 hdmi_phy->pll_hw.init = &clk_init;
451 hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
452 if (IS_ERR(hdmi_phy->pll)) {
453 ret = PTR_ERR(hdmi_phy->pll);
454 dev_err(dev, "Failed to register PLL: %d\n", ret);
455 return ret;
456 }
457
458 ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
459 &hdmi_phy->ibias);
460 if (ret < 0) {
461 dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
462 return ret;
463 }
464
465 ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
466 &hdmi_phy->ibias_up);
467 if (ret < 0) {
468 dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
469 return ret;
470 }
471
472 dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
473 hdmi_phy->drv_imp_clk = 0x30;
474 hdmi_phy->drv_imp_d2 = 0x30;
475 hdmi_phy->drv_imp_d1 = 0x30;
476 hdmi_phy->drv_imp_d0 = 0x30;
477
478 phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
479 if (IS_ERR(phy)) {
480 dev_err(dev, "Failed to create HDMI PHY\n");
481 return PTR_ERR(phy);
482 }
483 phy_set_drvdata(phy, hdmi_phy);
484
485 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
486 if (IS_ERR(phy_provider))
487 return PTR_ERR(phy_provider);
488
489 hdmi_phy->dev = dev;
490 return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
491 hdmi_phy->pll);
492}
493
494static int mtk_hdmi_phy_remove(struct platform_device *pdev)
495{
496 return 0;
497}
498
499static const struct of_device_id mtk_hdmi_phy_match[] = {
500 { .compatible = "mediatek,mt8173-hdmi-phy", },
501 {},
502};
503
504struct platform_driver mtk_hdmi_phy_driver = {
505 .probe = mtk_hdmi_phy_probe,
506 .remove = mtk_hdmi_phy_remove,
507 .driver = {
508 .name = "mediatek-hdmi-phy",
509 .of_match_table = mtk_hdmi_phy_match,
510 },
511};
512
513MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
514MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
515MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7200..2b4b125eebc3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
101 .minor = DRIVER_MINOR, 101 .minor = DRIVER_MINOR,
102 .patchlevel = DRIVER_PATCHLEVEL, 102 .patchlevel = DRIVER_PATCHLEVEL,
103 103
104 .gem_free_object = mgag200_gem_free_object, 104 .gem_free_object_unlocked = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 105 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 106 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = drm_gem_dumb_destroy, 107 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 14e64e08909e..6b21cb27e1cc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
182 } 182 }
183 } 183 }
184 184
185 fvv = pllreffreq * testn / testm; 185 fvv = pllreffreq * (n + 1) / (m + 1);
186 fvv = (fvv - 800000) / 50000; 186 fvv = (fvv - 800000) / 50000;
187 187
188 if (fvv > 15) 188 if (fvv > 15)
@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
202 WREG_DAC(MGA1064_PIX_PLLC_M, m); 202 WREG_DAC(MGA1064_PIX_PLLC_M, m);
203 WREG_DAC(MGA1064_PIX_PLLC_N, n); 203 WREG_DAC(MGA1064_PIX_PLLC_N, n);
204 WREG_DAC(MGA1064_PIX_PLLC_P, p); 204 WREG_DAC(MGA1064_PIX_PLLC_P, p);
205
206 if (mdev->unique_rev_id >= 0x04) {
207 WREG_DAC(0x1a, 0x09);
208 msleep(20);
209 WREG_DAC(0x1a, 0x01);
210
211 }
212
205 return 0; 213 return 0;
206} 214}
207 215
@@ -1344,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
1344 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 1352 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
1345 * but it's a requirement that we provide the function 1353 * but it's a requirement that we provide the function
1346 */ 1354 */
1347static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 1355static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1348 u16 *blue, uint32_t start, uint32_t size) 1356 u16 *blue, uint32_t size)
1349{ 1357{
1350 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 1358 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1351 int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
1352 int i; 1359 int i;
1353 1360
1354 for (i = start; i < end; i++) { 1361 for (i = 0; i < size; i++) {
1355 mga_crtc->lut_r[i] = red[i] >> 8; 1362 mga_crtc->lut_r[i] = red[i] >> 8;
1356 mga_crtc->lut_g[i] = green[i] >> 8; 1363 mga_crtc->lut_g[i] = green[i] >> 8;
1357 mga_crtc->lut_b[i] = blue[i] >> 8; 1364 mga_crtc->lut_b[i] = blue[i] >> 8;
1358 } 1365 }
1359 mga_crtc_load_lut(crtc); 1366 mga_crtc_load_lut(crtc);
1367
1368 return 0;
1360} 1369}
1361 1370
1362/* Simple cleanup function */ 1371/* Simple cleanup function */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index fbe304ee6c80..2aec27dbb5bb 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
408 } 408 }
409 409
410 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); 410 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
411 if (!adreno_gpu->memptrs) { 411 if (IS_ERR(adreno_gpu->memptrs)) {
412 dev_err(drm->dev, "could not vmap memptrs\n"); 412 dev_err(drm->dev, "could not vmap memptrs\n");
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd038c0..5960628ceb93 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
91 return MODE_OK; 91 return MODE_OK;
92} 92}
93 93
94static struct drm_encoder *
95edp_connector_best_encoder(struct drm_connector *connector)
96{
97 struct edp_connector *edp_connector = to_edp_connector(connector);
98
99 DBG("");
100 return edp_connector->edp->encoder;
101}
102
103static const struct drm_connector_funcs edp_connector_funcs = { 94static const struct drm_connector_funcs edp_connector_funcs = {
104 .dpms = drm_atomic_helper_connector_dpms, 95 .dpms = drm_atomic_helper_connector_dpms,
105 .detect = edp_connector_detect, 96 .detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
113static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { 104static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
114 .get_modes = edp_connector_get_modes, 105 .get_modes = edp_connector_get_modes,
115 .mode_valid = edp_connector_mode_valid, 106 .mode_valid = edp_connector_mode_valid,
116 .best_encoder = edp_connector_best_encoder,
117}; 107};
118 108
119/* initialize connector */ 109/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683112..a2515b466ce5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
406 return 0; 406 return 0;
407} 407}
408 408
409static struct drm_encoder *
410msm_hdmi_connector_best_encoder(struct drm_connector *connector)
411{
412 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
413 return hdmi_connector->hdmi->encoder;
414}
415
416static const struct drm_connector_funcs hdmi_connector_funcs = { 409static const struct drm_connector_funcs hdmi_connector_funcs = {
417 .dpms = drm_atomic_helper_connector_dpms, 410 .dpms = drm_atomic_helper_connector_dpms,
418 .detect = hdmi_connector_detect, 411 .detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
426static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { 419static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
427 .get_modes = msm_hdmi_connector_get_modes, 420 .get_modes = msm_hdmi_connector_get_modes,
428 .mode_valid = msm_hdmi_connector_mode_valid, 421 .mode_valid = msm_hdmi_connector_mode_valid,
429 .best_encoder = msm_hdmi_connector_best_encoder,
430}; 422};
431 423
432/* initialize connector */ 424/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..f145d256e332 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
107{ 107{
108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
109 int i, ncrtcs = state->dev->mode_config.num_crtc; 109 int i;
110 struct drm_crtc *crtc;
111 struct drm_crtc_state *crtc_state;
110 112
111 mdp4_enable(mdp4_kms); 113 mdp4_enable(mdp4_kms);
112 114
113 /* see 119ecb7fd */ 115 /* see 119ecb7fd */
114 for (i = 0; i < ncrtcs; i++) { 116 for_each_crtc_in_state(state, crtc, crtc_state, i)
115 struct drm_crtc *crtc = state->crtcs[i];
116 if (!crtc)
117 continue;
118 drm_crtc_vblank_get(crtc); 117 drm_crtc_vblank_get(crtc);
119 }
120} 118}
121 119
122static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
123{ 121{
124 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 122 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
125 int i, ncrtcs = state->dev->mode_config.num_crtc; 123 int i;
124 struct drm_crtc *crtc;
125 struct drm_crtc_state *crtc_state;
126 126
127 /* see 119ecb7fd */ 127 /* see 119ecb7fd */
128 for (i = 0; i < ncrtcs; i++) { 128 for_each_crtc_in_state(state, crtc, crtc_state, i)
129 struct drm_crtc *crtc = state->crtcs[i];
130 if (!crtc)
131 continue;
132 drm_crtc_vblank_put(crtc); 129 drm_crtc_vblank_put(crtc);
133 }
134 130
135 mdp4_disable(mdp4_kms); 131 mdp4_disable(mdp4_kms);
136} 132}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd7631ef..353429b05733 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
90 return MODE_OK; 90 return MODE_OK;
91} 91}
92 92
93static struct drm_encoder *
94mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
95{
96 struct mdp4_lvds_connector *mdp4_lvds_connector =
97 to_mdp4_lvds_connector(connector);
98 return mdp4_lvds_connector->encoder;
99}
100
101static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { 93static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
102 .dpms = drm_atomic_helper_connector_dpms, 94 .dpms = drm_atomic_helper_connector_dpms,
103 .detect = mdp4_lvds_connector_detect, 95 .detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
111static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
112 .get_modes = mdp4_lvds_connector_get_modes, 104 .get_modes = mdp4_lvds_connector_get_modes,
113 .mode_valid = mdp4_lvds_connector_mode_valid, 105 .mode_valid = mdp4_lvds_connector_mode_valid,
114 .best_encoder = mdp4_lvds_connector_best_encoder,
115}; 106};
116 107
117/* initialize connector */ 108/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..4e8ed739f558 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
374 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
375 struct plane_state pstates[STAGE_MAX + 1]; 375 struct plane_state pstates[STAGE_MAX + 1];
376 const struct mdp5_cfg_hw *hw_cfg; 376 const struct mdp5_cfg_hw *hw_cfg;
377 const struct drm_plane_state *pstate;
377 int cnt = 0, i; 378 int cnt = 0, i;
378 379
379 DBG("%s: check", mdp5_crtc->name); 380 DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
382 * and that we don't have conflicting mixer stages: 383 * and that we don't have conflicting mixer stages:
383 */ 384 */
384 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 385 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
385 drm_atomic_crtc_state_for_each_plane(plane, state) { 386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
386 struct drm_plane_state *pstate;
387 if (cnt >= (hw_cfg->lm.nb_stages)) { 387 if (cnt >= (hw_cfg->lm.nb_stages)) {
388 dev_err(dev->dev, "too many planes!\n"); 388 dev_err(dev->dev, "too many planes!\n");
389 return -EINVAL; 389 return -EINVAL;
390 } 390 }
391 391
392 pstate = state->state->plane_states[drm_plane_index(plane)];
393 392
394 /* plane might not have changed, in which case take
395 * current state:
396 */
397 if (!pstate)
398 pstate = plane->state;
399 pstates[cnt].plane = plane; 393 pstates[cnt].plane = plane;
400 pstates[cnt].state = to_mdp5_plane_state(pstate); 394 pstates[cnt].state = to_mdp5_plane_state(pstate);
401 395
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..f0c285b1c027 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -78,17 +78,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
78{ 78{
79 int i; 79 int i;
80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane; 81 struct drm_plane *plane;
82 82 struct drm_plane_state *plane_state;
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89 83
84 for_each_plane_in_state(state, plane, plane_state, i)
90 mdp5_plane_complete_commit(plane, plane_state); 85 mdp5_plane_complete_commit(plane, plane_state);
91 }
92 86
93 mdp5_disable(mdp5_kms); 87 mdp5_disable(mdp5_kms);
94} 88}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..4a8a6f1f1151 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state) 84 struct drm_atomic_state *old_state)
85{ 85{
86 struct drm_crtc *crtc; 86 struct drm_crtc *crtc;
87 struct drm_crtc_state *crtc_state;
87 struct msm_drm_private *priv = old_state->dev->dev_private; 88 struct msm_drm_private *priv = old_state->dev->dev_private;
88 struct msm_kms *kms = priv->kms; 89 struct msm_kms *kms = priv->kms;
89 int ncrtcs = old_state->dev->mode_config.num_crtc;
90 int i; 90 int i;
91 91
92 for (i = 0; i < ncrtcs; i++) { 92 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
93 crtc = old_state->crtcs[i];
94
95 if (!crtc)
96 continue;
97
98 if (!crtc->state->enable) 93 if (!crtc->state->enable)
99 continue; 94 continue;
100 95
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
192 struct drm_atomic_state *state, bool nonblock) 187 struct drm_atomic_state *state, bool nonblock)
193{ 188{
194 struct msm_drm_private *priv = dev->dev_private; 189 struct msm_drm_private *priv = dev->dev_private;
195 int nplanes = dev->mode_config.num_total_plane;
196 int ncrtcs = dev->mode_config.num_crtc;
197 struct msm_commit *c; 190 struct msm_commit *c;
191 struct drm_crtc *crtc;
192 struct drm_crtc_state *crtc_state;
193 struct drm_plane *plane;
194 struct drm_plane_state *plane_state;
198 int i, ret; 195 int i, ret;
199 196
200 ret = drm_atomic_helper_prepare_planes(dev, state); 197 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
210 /* 207 /*
211 * Figure out what crtcs we have: 208 * Figure out what crtcs we have:
212 */ 209 */
213 for (i = 0; i < ncrtcs; i++) { 210 for_each_crtc_in_state(state, crtc, crtc_state, i)
214 struct drm_crtc *crtc = state->crtcs[i]; 211 c->crtc_mask |= drm_crtc_mask(crtc);
215 if (!crtc)
216 continue;
217 c->crtc_mask |= (1 << drm_crtc_index(crtc));
218 }
219 212
220 /* 213 /*
221 * Figure out what fence to wait for: 214 * Figure out what fence to wait for:
222 */ 215 */
223 for (i = 0; i < nplanes; i++) { 216 for_each_plane_in_state(state, plane, plane_state, i) {
224 struct drm_plane *plane = state->planes[i]; 217 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
225 struct drm_plane_state *new_state = state->plane_states[i]; 218 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
226
227 if (!plane)
228 continue;
229
230 if ((plane->state->fb != new_state->fb) && new_state->fb) {
231 struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
232 struct msm_gem_object *msm_obj = to_msm_bo(obj); 219 struct msm_gem_object *msm_obj = to_msm_bo(obj);
233 220
234 new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 221 plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
235 } 222 }
236 } 223 }
237 224
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
251 * the software side now. 238 * the software side now.
252 */ 239 */
253 240
254 drm_atomic_helper_swap_state(dev, state); 241 drm_atomic_helper_swap_state(state, true);
255 242
256 /* 243 /*
257 * Everything below can be run asynchronously without the need to grab 244 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c654092ef78..a02dc2b27739 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -197,8 +197,6 @@ static int msm_drm_uninit(struct device *dev)
197 197
198 drm_kms_helper_poll_fini(ddev); 198 drm_kms_helper_poll_fini(ddev);
199 199
200 drm_connector_unregister_all(ddev);
201
202 drm_dev_unregister(ddev); 200 drm_dev_unregister(ddev);
203 201
204#ifdef CONFIG_DRM_FBDEV_EMULATION 202#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -431,12 +429,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
431 if (ret) 429 if (ret)
432 goto fail; 430 goto fail;
433 431
434 ret = drm_connector_register_all(ddev);
435 if (ret) {
436 dev_err(dev, "failed to register connectors\n");
437 goto fail;
438 }
439
440 drm_mode_config_reset(ddev); 432 drm_mode_config_reset(ddev);
441 433
442#ifdef CONFIG_DRM_FBDEV_EMULATION 434#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -730,7 +722,6 @@ static struct drm_driver msm_driver = {
730 .open = msm_open, 722 .open = msm_open,
731 .preclose = msm_preclose, 723 .preclose = msm_preclose,
732 .lastclose = msm_lastclose, 724 .lastclose = msm_lastclose,
733 .set_busid = drm_platform_set_busid,
734 .irq_handler = msm_irq, 725 .irq_handler = msm_irq,
735 .irq_preinstall = msm_irq_preinstall, 726 .irq_preinstall = msm_irq_preinstall,
736 .irq_postinstall = msm_irq_postinstall, 727 .irq_postinstall = msm_irq_postinstall,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..7919c24c6ddd 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -56,17 +56,9 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
56 kfree(msm_fb); 56 kfree(msm_fb);
57} 57}
58 58
59static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
60 struct drm_file *file_priv, unsigned flags, unsigned color,
61 struct drm_clip_rect *clips, unsigned num_clips)
62{
63 return 0;
64}
65
66static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { 59static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
67 .create_handle = msm_framebuffer_create_handle, 60 .create_handle = msm_framebuffer_create_handle,
68 .destroy = msm_framebuffer_destroy, 61 .destroy = msm_framebuffer_destroy,
69 .dirty = msm_framebuffer_dirty,
70}; 62};
71 63
72#ifdef CONFIG_DEBUG_FS 64#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf3482e..a9223bea871b 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
159 dev->mode_config.fb_base = paddr; 159 dev->mode_config.fb_base = paddr;
160 160
161 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); 161 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
162 if (IS_ERR(fbi->screen_base)) {
163 ret = PTR_ERR(fbi->screen_base);
164 goto fail_unlock;
165 }
162 fbi->screen_size = fbdev->bo->size; 166 fbi->screen_size = fbdev->bo->size;
163 fbi->fix.smem_start = paddr; 167 fbi->fix.smem_start = paddr;
164 fbi->fix.smem_len = fbdev->bo->size; 168 fbi->fix.smem_len = fbdev->bo->size;
@@ -184,21 +188,7 @@ fail:
184 return ret; 188 return ret;
185} 189}
186 190
187static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
188 u16 red, u16 green, u16 blue, int regno)
189{
190 DBG("fbdev: set gamma");
191}
192
193static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
194 u16 *red, u16 *green, u16 *blue, int regno)
195{
196 DBG("fbdev: get gamma");
197}
198
199static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { 191static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
200 .gamma_set = msm_crtc_fb_gamma_set,
201 .gamma_get = msm_crtc_fb_gamma_get,
202 .fb_probe = msm_fbdev_create, 192 .fb_probe = msm_fbdev_create,
203}; 193};
204 194
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7daf4054dd2b..69836f5685b1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
398 return ERR_CAST(pages); 398 return ERR_CAST(pages);
399 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 399 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
400 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 400 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
401 if (msm_obj->vaddr == NULL)
402 return ERR_PTR(-ENOMEM);
401 } 403 }
402 return msm_obj->vaddr; 404 return msm_obj->vaddr;
403} 405}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b89ca5174863..eb4bb8b2f3a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
40 40
41 submit->dev = dev; 41 submit->dev = dev;
42 submit->gpu = gpu; 42 submit->gpu = gpu;
43 submit->fence = NULL;
43 submit->pid = get_pid(task_pid(current)); 44 submit->pid = get_pid(task_pid(current));
44 45
45 /* initially, until copy_from_user() and bo lookup succeeds: */ 46 /* initially, until copy_from_user() and bo lookup succeeds: */
46 submit->nr_bos = 0; 47 submit->nr_bos = 0;
47 submit->nr_cmds = 0; 48 submit->nr_cmds = 0;
48 49
50 INIT_LIST_HEAD(&submit->node);
49 INIT_LIST_HEAD(&submit->bo_list); 51 INIT_LIST_HEAD(&submit->bo_list);
50 ww_acquire_init(&submit->ticket, &reservation_ww_class); 52 ww_acquire_init(&submit->ticket, &reservation_ww_class);
51 53
@@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
75 void __user *userptr = 77 void __user *userptr =
76 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); 78 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
77 79
80 /* make sure we don't have garbage flags, in case we hit
81 * error path before flags is initialized:
82 */
83 submit->bos[i].flags = 0;
84
78 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); 85 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
79 if (ret) { 86 if (ret) {
80 ret = -EFAULT; 87 ret = -EFAULT;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b48f73ac6389..0857710c2ff2 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
312 struct msm_gem_object *obj = submit->bos[idx].obj; 312 struct msm_gem_object *obj = submit->bos[idx].obj;
313 const char *buf = msm_gem_vaddr_locked(&obj->base); 313 const char *buf = msm_gem_vaddr_locked(&obj->base);
314 314
315 if (IS_ERR(buf))
316 continue;
317
315 buf += iova - submit->bos[idx].iova; 318 buf += iova - submit->bos[idx].iova;
316 319
317 rd_write_section(rd, RD_GPUADDR, 320 rd_write_section(rd, RD_GPUADDR,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..42f5359cf988 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
40 } 40 }
41 41
42 ring->start = msm_gem_vaddr_locked(ring->bo); 42 ring->start = msm_gem_vaddr_locked(ring->bo);
43 if (IS_ERR(ring->start)) {
44 ret = PTR_ERR(ring->start);
45 goto fail;
46 }
43 ring->end = ring->start + (size / 4); 47 ring->end = ring->start + (size / 4);
44 ring->cur = ring->start; 48 ring->cur = ring->start;
45 49
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54da33..0cb7a18cde26 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); 785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
786} 786}
787 787
788static void 788static int
789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
790 uint32_t size) 790 uint32_t size)
791{ 791{
792 int end = (start + size > 256) ? 256 : start + size, i;
793 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 792 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
793 int i;
794 794
795 for (i = start; i < end; i++) { 795 for (i = 0; i < size; i++) {
796 nv_crtc->lut.r[i] = r[i]; 796 nv_crtc->lut.r[i] = r[i];
797 nv_crtc->lut.g[i] = g[i]; 797 nv_crtc->lut.g[i] = g[i];
798 nv_crtc->lut.b[i] = b[i]; 798 nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
805 */ 805 */
806 if (!nv_crtc->base.primary->fb) { 806 if (!nv_crtc->base.primary->fb) {
807 nv_crtc->lut.depth = 0; 807 nv_crtc->lut.depth = 0;
808 return; 808 return 0;
809 } 809 }
810 810
811 nv_crtc_gamma_load(crtc); 811 nv_crtc_gamma_load(crtc);
812
813 return 0;
812} 814}
813 815
814static int 816static int
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index c612dc1f1eb4..126a85cc81bc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -16,9 +16,9 @@ enum nvkm_devidx {
16 NVKM_SUBDEV_MC, 16 NVKM_SUBDEV_MC,
17 NVKM_SUBDEV_BUS, 17 NVKM_SUBDEV_BUS,
18 NVKM_SUBDEV_TIMER, 18 NVKM_SUBDEV_TIMER,
19 NVKM_SUBDEV_INSTMEM,
19 NVKM_SUBDEV_FB, 20 NVKM_SUBDEV_FB,
20 NVKM_SUBDEV_LTC, 21 NVKM_SUBDEV_LTC,
21 NVKM_SUBDEV_INSTMEM,
22 NVKM_SUBDEV_MMU, 22 NVKM_SUBDEV_MMU,
23 NVKM_SUBDEV_BAR, 23 NVKM_SUBDEV_BAR,
24 NVKM_SUBDEV_PMU, 24 NVKM_SUBDEV_PMU,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index db10c11f0595..c5a6ebd5a478 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
25 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); 25 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
26 26
27struct nvbios_ocfg { 27struct nvbios_ocfg {
28 u16 match; 28 u8 proto;
29 u8 flags;
29 u16 clkcmp[2]; 30 u16 clkcmp[2];
30}; 31};
31 32
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
33 u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 34 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
34u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, 35u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
35 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); 36 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
36u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, 37u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
37 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); 38 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
38u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); 39u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
39#endif 40#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7c77f960c8b8..6072fe292db8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -760,12 +760,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
760 760
761 /* Initialize a page flip struct */ 761 /* Initialize a page flip struct */
762 *s = (struct nouveau_page_flip_state) 762 *s = (struct nouveau_page_flip_state)
763 { { }, event, nouveau_crtc(crtc)->index, 763 { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
764 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
765 new_bo->bo.offset }; 764 new_bo->bo.offset };
766 765
767 /* Keep vblanks on during flip, for the target crtc of this flip */ 766 /* Keep vblanks on during flip, for the target crtc of this flip */
768 drm_vblank_get(dev, nouveau_crtc(crtc)->index); 767 drm_crtc_vblank_get(crtc);
769 768
770 /* Emit a page flip */ 769 /* Emit a page flip */
771 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 770 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +809,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
810 return 0; 809 return 0;
811 810
812fail_unreserve: 811fail_unreserve:
813 drm_vblank_put(dev, nouveau_crtc(crtc)->index); 812 drm_crtc_vblank_put(crtc);
814 ttm_bo_unreserve(&old_bo->bo); 813 ttm_bo_unreserve(&old_bo->bo);
815fail_unpin: 814fail_unpin:
816 mutex_unlock(&cli->mutex); 815 mutex_unlock(&cli->mutex);
@@ -842,17 +841,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 842 if (s->event) {
844 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 drm_arm_vblank_event(dev, s->crtc, s->event); 844 drm_crtc_arm_vblank_event(s->crtc, s->event);
846 } else { 845 } else {
847 drm_send_vblank_event(dev, s->crtc, s->event); 846 drm_crtc_send_vblank_event(s->crtc, s->event);
848 847
849 /* Give up ownership of vblank for page-flipped crtc */ 848 /* Give up ownership of vblank for page-flipped crtc */
850 drm_vblank_put(dev, s->crtc); 849 drm_crtc_vblank_put(s->crtc);
851 } 850 }
852 } 851 }
853 else { 852 else {
854 /* Give up ownership of vblank for page-flipped crtc */ 853 /* Give up ownership of vblank for page-flipped crtc */
855 drm_vblank_put(dev, s->crtc); 854 drm_crtc_vblank_put(s->crtc);
856 } 855 }
857 856
858 list_del(&s->head); 857 list_del(&s->head);
@@ -873,9 +872,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
873 872
874 if (!nouveau_finish_page_flip(chan, &state)) { 873 if (!nouveau_finish_page_flip(chan, &state)) {
875 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 874 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
876 nv_set_crtc_base(drm->dev, state.crtc, state.offset + 875 nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
877 state.y * state.pitch + 876 state.offset + state.crtc->y *
878 state.x * state.bpp / 8); 877 state.pitch + state.crtc->x *
878 state.bpp / 8);
879 } 879 }
880 } 880 }
881 881
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 24273bacd885..0420ee861ea4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
28struct nouveau_page_flip_state { 28struct nouveau_page_flip_state {
29 struct list_head head; 29 struct list_head head;
30 struct drm_pending_vblank_event *event; 30 struct drm_pending_vblank_event *event;
31 int crtc, bpp, pitch, x, y; 31 struct drm_crtc *crtc;
32 int bpp, pitch;
32 u64 offset; 33 u64 offset;
33}; 34};
34 35
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 11f8dd9c0edb..295e7621cc68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/apple-gmux.h>
26#include <linux/console.h> 25#include <linux/console.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/pci.h> 28#include <linux/pci.h>
30#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h> 30#include <linux/vga_switcheroo.h>
33 31
34#include "drmP.h" 32#include "drmP.h"
@@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
315 bool boot = false; 313 bool boot = false;
316 int ret; 314 int ret;
317 315
318 /* 316 if (vga_switcheroo_client_probe_defer(pdev))
319 * apple-gmux is needed on dual GPU MacBook Pro
320 * to probe the panel if we're the inactive GPU.
321 */
322 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
323 apple_gmux_present() && pdev != vga_default_device() &&
324 !vga_switcheroo_handler_flags())
325 return -EPROBE_DEFER; 317 return -EPROBE_DEFER;
326 318
327 /* remove conflicting drivers (vesafb, efifb etc) */ 319 /* remove conflicting drivers (vesafb, efifb etc) */
@@ -970,7 +962,7 @@ driver_stub = {
970 .gem_prime_vmap = nouveau_gem_prime_vmap, 962 .gem_prime_vmap = nouveau_gem_prime_vmap,
971 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 963 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
972 964
973 .gem_free_object = nouveau_gem_object_del, 965 .gem_free_object_unlocked = nouveau_gem_object_del,
974 .gem_open_object = nouveau_gem_object_open, 966 .gem_open_object = nouveau_gem_object_open,
975 .gem_close_object = nouveau_gem_object_close, 967 .gem_close_object = nouveau_gem_object_close,
976 968
@@ -1078,7 +1070,6 @@ nouveau_drm_init(void)
1078 driver_pci = driver_stub; 1070 driver_pci = driver_stub;
1079 driver_pci.set_busid = drm_pci_set_busid; 1071 driver_pci.set_busid = drm_pci_set_busid;
1080 driver_platform = driver_stub; 1072 driver_platform = driver_stub;
1081 driver_platform.set_busid = drm_platform_set_busid;
1082 1073
1083 nouveau_display_options(); 1074 nouveau_display_options();
1084 1075
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 57aaf98a26f9..d1f248fd3506 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev)
552 if (ret) 552 if (ret)
553 goto fini; 553 goto fini;
554 554
555 if (fbcon->helper.fbdev)
556 fbcon->helper.fbdev->pixmap.buf_align = 4;
555 return 0; 557 return 0;
556 558
557fini: 559fini:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
57 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
58 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
59 59
60 u32 contexts, context_base; 60 u32 contexts;
61 u64 context_base;
61 bool uevent; 62 bool uevent;
62}; 63};
63 64
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077a95..08f9c6fa0f7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
212 ntfy->p->base.event = &ntfy->p->e.base; 212 ntfy->p->base.event = &ntfy->p->e.base;
213 ntfy->p->base.file_priv = f; 213 ntfy->p->base.file_priv = f;
214 ntfy->p->base.pid = current->pid; 214 ntfy->p->base.pid = current->pid;
215 ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
216 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; 215 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
217 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; 216 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
218 217
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 0f3e4bb411cc..7d9248b8c664 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
82 uint32_t fg; 82 uint32_t fg;
83 uint32_t bg; 83 uint32_t bg;
84 uint32_t dsize; 84 uint32_t dsize;
85 uint32_t width;
86 uint32_t *data = (uint32_t *)image->data; 85 uint32_t *data = (uint32_t *)image->data;
87 int ret; 86 int ret;
88 87
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93 if (ret) 92 if (ret)
94 return ret; 93 return ret;
95 94
96 width = ALIGN(image->width, 8);
97 dsize = ALIGN(width * image->height, 32) >> 5;
98
99 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 95 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
100 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 96 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
101 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; 97 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
111 ((image->dx + image->width) & 0xffff)); 107 ((image->dx + image->width) & 0xffff));
112 OUT_RING(chan, bg); 108 OUT_RING(chan, bg);
113 OUT_RING(chan, fg); 109 OUT_RING(chan, fg);
114 OUT_RING(chan, (image->height << 16) | width); 110 OUT_RING(chan, (image->height << 16) | image->width);
115 OUT_RING(chan, (image->height << 16) | image->width); 111 OUT_RING(chan, (image->height << 16) | image->width);
116 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 112 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
117 113
114 dsize = ALIGN(image->width * image->height, 32) >> 5;
118 while (dsize) { 115 while (dsize) {
119 int iter_len = dsize > 128 ? 128 : dsize; 116 int iter_len = dsize > 128 ? 128 : dsize;
120 117
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b0057bf..7a7788212df7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1346,21 +1346,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1346 return 0; 1346 return 0;
1347} 1347}
1348 1348
1349static void 1349static int
1350nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 1350nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1351 uint32_t start, uint32_t size) 1351 uint32_t size)
1352{ 1352{
1353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1354 u32 end = min_t(u32, start + size, 256);
1355 u32 i; 1354 u32 i;
1356 1355
1357 for (i = start; i < end; i++) { 1356 for (i = 0; i < size; i++) {
1358 nv_crtc->lut.r[i] = r[i]; 1357 nv_crtc->lut.r[i] = r[i];
1359 nv_crtc->lut.g[i] = g[i]; 1358 nv_crtc->lut.g[i] = g[i];
1360 nv_crtc->lut.b[i] = b[i]; 1359 nv_crtc->lut.b[i] = b[i];
1361 } 1360 }
1362 1361
1363 nv50_crtc_lut_load(crtc); 1362 nv50_crtc_lut_load(crtc);
1363
1364 return 0;
1364} 1365}
1365 1366
1366static void 1367static void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 33d9ee0fac40..1aeb698e9707 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
95 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
97 struct nouveau_channel *chan = drm->channel; 97 struct nouveau_channel *chan = drm->channel;
98 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t dwords, *data = (uint32_t *)image->data;
99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
100 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
101 int ret; 101 int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
107 if (ret) 107 if (ret)
108 return ret; 108 return ret;
109 109
110 width = ALIGN(image->width, 32);
111 dwords = (width * image->height) >> 5;
112
113 BEGIN_NV04(chan, NvSub2D, 0x0814, 2); 110 BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
114 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 111 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
115 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 112 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
128 OUT_RING(chan, 0); 125 OUT_RING(chan, 0);
129 OUT_RING(chan, image->dy); 126 OUT_RING(chan, image->dy);
130 127
128 dwords = ALIGN(image->width * image->height, 32) >> 5;
131 while (dwords) { 129 while (dwords) {
132 int push = dwords > 2047 ? 2047 : dwords; 130 int push = dwords > 2047 ? 2047 : dwords;
133 131
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a0913359ac05..839f4c8c1805 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
95 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
97 struct nouveau_channel *chan = drm->channel; 97 struct nouveau_channel *chan = drm->channel;
98 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t dwords, *data = (uint32_t *)image->data;
99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
100 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
101 int ret; 101 int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
107 if (ret) 107 if (ret)
108 return ret; 108 return ret;
109 109
110 width = ALIGN(image->width, 32);
111 dwords = (width * image->height) >> 5;
112
113 BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); 110 BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
114 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 111 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
115 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 112 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
128 OUT_RING (chan, 0); 125 OUT_RING (chan, 0);
129 OUT_RING (chan, image->dy); 126 OUT_RING (chan, image->dy);
130 127
128 dwords = ALIGN(image->width * image->height, 32) >> 5;
131 while (dwords) { 129 while (dwords) {
132 int push = dwords > 2047 ? 2047 : dwords; 130 int push = dwords > 2047 ? 2047 : dwords;
133 131
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 18fab3973ce5..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
1614 .fini = nvkm_device_pci_fini, 1614 .fini = nvkm_device_pci_fini,
1615 .resource_addr = nvkm_device_pci_resource_addr, 1615 .resource_addr = nvkm_device_pci_resource_addr,
1616 .resource_size = nvkm_device_pci_resource_size, 1616 .resource_size = nvkm_device_pci_resource_size,
1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), 1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
1618}; 1618};
1619 1619
1620int 1620int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index a74c5dd27dc0..e2a64ed14b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
18nvkm-y += nvkm/engine/disp/sornv50.o 18nvkm-y += nvkm/engine/disp/sornv50.o
19nvkm-y += nvkm/engine/disp/sorg94.o 19nvkm-y += nvkm/engine/disp/sorg94.o
20nvkm-y += nvkm/engine/disp/sorgf119.o 20nvkm-y += nvkm/engine/disp/sorgf119.o
21nvkm-y += nvkm/engine/disp/sorgm107.o
21nvkm-y += nvkm/engine/disp/sorgm200.o 22nvkm-y += nvkm/engine/disp/sorgm200.o
22nvkm-y += nvkm/engine/disp/dport.o 23nvkm-y += nvkm/engine/disp/dport.o
23 24
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index f0314664349c..5dd34382f55a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
76 mask |= 0x0001 << or; 76 mask |= 0x0001 << or;
77 mask |= 0x0100 << head; 77 mask |= 0x0100 << head;
78 78
79
79 list_for_each_entry(outp, &disp->base.outp, head) { 80 list_for_each_entry(outp, &disp->base.outp, head) {
80 if ((outp->info.hasht & 0xff) == type && 81 if ((outp->info.hasht & 0xff) == type &&
81 (outp->info.hashm & mask) == mask) { 82 (outp->info.hashm & mask) == mask) {
@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
155 if (!outp) 156 if (!outp)
156 return NULL; 157 return NULL;
157 158
159 *conf = (ctrl & 0x00000f00) >> 8;
158 switch (outp->info.type) { 160 switch (outp->info.type) {
159 case DCB_OUTPUT_TMDS: 161 case DCB_OUTPUT_TMDS:
160 *conf = (ctrl & 0x00000f00) >> 8;
161 if (*conf == 5) 162 if (*conf == 5)
162 *conf |= 0x0100; 163 *conf |= 0x0100;
163 break; 164 break;
164 case DCB_OUTPUT_LVDS: 165 case DCB_OUTPUT_LVDS:
165 *conf = disp->sor.lvdsconf; 166 *conf |= disp->sor.lvdsconf;
166 break;
167 case DCB_OUTPUT_DP:
168 *conf = (ctrl & 0x00000f00) >> 8;
169 break; 167 break;
170 case DCB_OUTPUT_ANALOG:
171 default: 168 default:
172 *conf = 0x00ff;
173 break; 169 break;
174 } 170 }
175 171
176 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 172 data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
173 &ver, &hdr, &cnt, &len, &info2);
177 if (data && id < 0xff) { 174 if (data && id < 0xff) {
178 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 175 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
179 if (data) { 176 if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index b6944142d616..f4b9cf8574be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -36,7 +36,7 @@ gm107_disp = {
36 .outp.internal.crt = nv50_dac_output_new, 36 .outp.internal.crt = nv50_dac_output_new,
37 .outp.internal.tmds = nv50_sor_output_new, 37 .outp.internal.tmds = nv50_sor_output_new,
38 .outp.internal.lvds = nv50_sor_output_new, 38 .outp.internal.lvds = nv50_sor_output_new,
39 .outp.internal.dp = gf119_sor_dp_new, 39 .outp.internal.dp = gm107_sor_dp_new,
40 .dac.nr = 3, 40 .dac.nr = 3,
41 .dac.power = nv50_dac_power, 41 .dac.power = nv50_dac_power,
42 .dac.sense = nv50_dac_sense, 42 .dac.sense = nv50_dac_sense,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 4226d2153b9c..fcb1b0c46d64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
387 if (!outp) 387 if (!outp)
388 return NULL; 388 return NULL;
389 389
390 *conf = (ctrl & 0x00000f00) >> 8;
390 if (outp->info.location == 0) { 391 if (outp->info.location == 0) {
391 switch (outp->info.type) { 392 switch (outp->info.type) {
392 case DCB_OUTPUT_TMDS: 393 case DCB_OUTPUT_TMDS:
393 *conf = (ctrl & 0x00000f00) >> 8;
394 if (*conf == 5) 394 if (*conf == 5)
395 *conf |= 0x0100; 395 *conf |= 0x0100;
396 break; 396 break;
397 case DCB_OUTPUT_LVDS: 397 case DCB_OUTPUT_LVDS:
398 *conf = disp->sor.lvdsconf; 398 *conf |= disp->sor.lvdsconf;
399 break; 399 break;
400 case DCB_OUTPUT_DP:
401 *conf = (ctrl & 0x00000f00) >> 8;
402 break;
403 case DCB_OUTPUT_ANALOG:
404 default: 400 default:
405 *conf = 0x00ff;
406 break; 401 break;
407 } 402 }
408 } else { 403 } else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
410 pclk = pclk / 2; 405 pclk = pclk / 2;
411 } 406 }
412 407
413 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); 408 data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
409 &ver, &hdr, &cnt, &len, &info2);
414 if (data && id < 0xff) { 410 if (data && id < 0xff) {
415 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 411 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
416 if (data) { 412 if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index e9067ba4e179..4e983f6d7032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
62int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, 62int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
63 struct nvkm_output **); 63 struct nvkm_output **);
64int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); 64int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
65int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
65 66
66int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, 67int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
67 struct nvkm_output **); 68 struct nvkm_output **);
69int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
70
71int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
72 struct nvkm_output **);
68#endif 73#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index b4b41b135643..22706c0a54b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,8 +40,7 @@ static int
40gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 40gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
41{ 41{
42 struct nvkm_device *device = outp->base.disp->engine.subdev.device; 42 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
43 const u32 loff = gf119_sor_loff(outp); 43 nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
44 nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
45 return 0; 44 return 0;
46} 45}
47 46
@@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
64 return 0; 63 return 0;
65} 64}
66 65
67static int 66int
68gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, 67gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
69 int ln, int vs, int pe, int pc) 68 int ln, int vs, int pe, int pc)
70{ 69{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644
index 000000000000..37790b2617c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "nv50.h"
25#include "outpdp.h"
26
27int
28gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
29{
30 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
31 const u32 soff = outp->base.or * 0x800;
32 const u32 data = 0x01010101 * pattern;
33 if (outp->base.info.sorconf.link & 1)
34 nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
35 else
36 nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
37 return 0;
38}
39
40static const struct nvkm_output_dp_func
41gm107_sor_dp_func = {
42 .pattern = gm107_sor_dp_pattern,
43 .lnk_pwr = g94_sor_dp_lnk_pwr,
44 .lnk_ctl = gf119_sor_dp_lnk_ctl,
45 .drv_ctl = gf119_sor_dp_drv_ctl,
46};
47
48int
49gm107_sor_dp_new(struct nvkm_disp *disp, int index,
50 struct dcb_output *dcbE, struct nvkm_output **poutp)
51{
52 return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
53}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 2cfbef9c344f..c44fa7ea672a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
57} 57}
58 58
59static int 59static int
60gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
61{
62 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
63 const u32 soff = gm200_sor_soff(outp);
64 const u32 data = 0x01010101 * pattern;
65 if (outp->base.info.sorconf.link & 1)
66 nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
67 else
68 nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
69 return 0;
70}
71
72static int
73gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) 60gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
74{ 61{
75 struct nvkm_device *device = outp->base.disp->engine.subdev.device; 62 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
129 116
130static const struct nvkm_output_dp_func 117static const struct nvkm_output_dp_func
131gm200_sor_dp_func = { 118gm200_sor_dp_func = {
132 .pattern = gm200_sor_dp_pattern, 119 .pattern = gm107_sor_dp_pattern,
133 .lnk_pwr = gm200_sor_dp_lnk_pwr, 120 .lnk_pwr = gm200_sor_dp_lnk_pwr,
134 .lnk_ctl = gf119_sor_dp_lnk_ctl, 121 .lnk_ctl = gf119_sor_dp_lnk_ctl,
135 .drv_ctl = gm200_sor_dp_drv_ctl, 122 .drv_ctl = gm200_sor_dp_drv_ctl,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9513badb8220..ae9ab5b1ab97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
949} 949}
950 950
951static const struct nvkm_enum gf100_mp_warp_error[] = { 951static const struct nvkm_enum gf100_mp_warp_error[] = {
952 { 0x00, "NO_ERROR" }, 952 { 0x01, "STACK_ERROR" },
953 { 0x01, "STACK_MISMATCH" }, 953 { 0x02, "API_STACK_ERROR" },
954 { 0x03, "RET_EMPTY_STACK_ERROR" },
955 { 0x04, "PC_WRAP" },
954 { 0x05, "MISALIGNED_PC" }, 956 { 0x05, "MISALIGNED_PC" },
955 { 0x08, "MISALIGNED_GPR" }, 957 { 0x06, "PC_OVERFLOW" },
956 { 0x09, "INVALID_OPCODE" }, 958 { 0x07, "MISALIGNED_IMMC_ADDR" },
957 { 0x0d, "GPR_OUT_OF_BOUNDS" }, 959 { 0x08, "MISALIGNED_REG" },
958 { 0x0e, "MEM_OUT_OF_BOUNDS" }, 960 { 0x09, "ILLEGAL_INSTR_ENCODING" },
959 { 0x0f, "UNALIGNED_MEM_ACCESS" }, 961 { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
962 { 0x0b, "ILLEGAL_INSTR_PARAM" },
963 { 0x0c, "INVALID_CONST_ADDR" },
964 { 0x0d, "OOR_REG" },
965 { 0x0e, "OOR_ADDR" },
966 { 0x0f, "MISALIGNED_ADDR" },
960 { 0x10, "INVALID_ADDR_SPACE" }, 967 { 0x10, "INVALID_ADDR_SPACE" },
961 { 0x11, "INVALID_PARAM" }, 968 { 0x11, "ILLEGAL_INSTR_PARAM2" },
969 { 0x12, "INVALID_CONST_ADDR_LDC" },
970 { 0x13, "GEOMETRY_SM_ERROR" },
971 { 0x14, "DIVERGENT" },
972 { 0x15, "WARP_EXIT" },
962 {} 973 {}
963}; 974};
964 975
965static const struct nvkm_bitfield gf100_mp_global_error[] = { 976static const struct nvkm_bitfield gf100_mp_global_error[] = {
977 { 0x00000001, "SM_TO_SM_FAULT" },
978 { 0x00000002, "L1_ERROR" },
966 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 979 { 0x00000004, "MULTIPLE_WARP_ERRORS" },
967 { 0x00000008, "OUT_OF_STACK_SPACE" }, 980 { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
981 { 0x00000010, "BPT_INT" },
982 { 0x00000020, "BPT_PAUSE" },
983 { 0x00000040, "SINGLE_STEP_COMPLETE" },
984 { 0x20000000, "ECC_SEC_ERROR" },
985 { 0x40000000, "ECC_DED_ERROR" },
986 { 0x80000000, "TIMEOUT" },
968 {} 987 {}
969}; 988};
970 989
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index a5e92135cd77..9efb1b48cd54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
141{ 141{
142 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); 142 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
143 if (data) { 143 if (data) {
144 info->match = nvbios_rd16(bios, data + 0x00); 144 info->proto = nvbios_rd08(bios, data + 0x00);
145 info->flags = nvbios_rd16(bios, data + 0x01);
145 info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); 146 info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
146 info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); 147 info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
147 } 148 }
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
149} 150}
150 151
151u16 152u16
152nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, 153nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
153 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) 154 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
154{ 155{
155 u16 data, idx = 0; 156 u16 data, idx = 0;
156 while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { 157 while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
157 if (info->match == type) 158 if ((info->proto == proto || info->proto == 0xff) &&
159 (info->flags == flags))
158 break; 160 break;
159 } 161 }
160 return data; 162 return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 323c79abe468..41bd5d0f7692 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
276 struct pwr_rail_t *r = &stbl.rail[i]; 276 struct pwr_rail_t *r = &stbl.rail[i];
277 struct nvkm_iccsense_rail *rail; 277 struct nvkm_iccsense_rail *rail;
278 struct nvkm_iccsense_sensor *sensor; 278 struct nvkm_iccsense_sensor *sensor;
279 int (*read)(struct nvkm_iccsense *,
280 struct nvkm_iccsense_rail *);
279 281
280 if (!r->mode || r->resistor_mohm == 0) 282 if (!r->mode || r->resistor_mohm == 0)
281 continue; 283 continue;
@@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
284 if (!sensor) 286 if (!sensor)
285 continue; 287 continue;
286 288
287 rail = kmalloc(sizeof(*rail), GFP_KERNEL);
288 if (!rail)
289 return -ENOMEM;
290
291 switch (sensor->type) { 289 switch (sensor->type) {
292 case NVBIOS_EXTDEV_INA209: 290 case NVBIOS_EXTDEV_INA209:
293 if (r->rail != 0) 291 if (r->rail != 0)
294 continue; 292 continue;
295 rail->read = nvkm_iccsense_ina209_read; 293 read = nvkm_iccsense_ina209_read;
296 break; 294 break;
297 case NVBIOS_EXTDEV_INA219: 295 case NVBIOS_EXTDEV_INA219:
298 if (r->rail != 0) 296 if (r->rail != 0)
299 continue; 297 continue;
300 rail->read = nvkm_iccsense_ina219_read; 298 read = nvkm_iccsense_ina219_read;
301 break; 299 break;
302 case NVBIOS_EXTDEV_INA3221: 300 case NVBIOS_EXTDEV_INA3221:
303 if (r->rail >= 3) 301 if (r->rail >= 3)
304 continue; 302 continue;
305 rail->read = nvkm_iccsense_ina3221_read; 303 read = nvkm_iccsense_ina3221_read;
306 break; 304 break;
307 default: 305 default:
308 continue; 306 continue;
309 } 307 }
310 308
309 rail = kmalloc(sizeof(*rail), GFP_KERNEL);
310 if (!rail)
311 return -ENOMEM;
311 sensor->rail_mask |= 1 << r->rail; 312 sensor->rail_mask |= 1 << r->rail;
313 rail->read = read;
312 rail->sensor = sensor; 314 rail->sensor = sensor;
313 rail->idx = r->rail; 315 rail->idx = r->rail;
314 rail->mohm = r->resistor_mohm; 316 rail->mohm = r->resistor_mohm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index e292f5679418..389fb13a1998 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
69} 69}
70 70
71static void 71static void
72gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) 72gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
73{ 73{
74 struct nvkm_subdev *subdev = &ltc->subdev; 74 struct nvkm_subdev *subdev = &ltc->subdev;
75 struct nvkm_device *device = subdev->device; 75 struct nvkm_device *device = subdev->device;
76 u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); 76 u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
77 u32 stat = nvkm_rd32(device, base + 0x00c); 77 u32 stat = nvkm_rd32(device, base + 0x00c);
78 78
79 if (stat) { 79 if (stat) {
@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
92 while (mask) { 92 while (mask) {
93 u32 s, c = __ffs(mask); 93 u32 s, c = __ffs(mask);
94 for (s = 0; s < ltc->lts_nr; s++) 94 for (s = 0; s < ltc->lts_nr; s++)
95 gm107_ltc_lts_isr(ltc, c, s); 95 gm107_ltc_intr_lts(ltc, c, s);
96 mask &= ~(1 << c); 96 mask &= ~(1 << c);
97 } 97 }
98} 98}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 2a29bfd5125a..e18e0dc19ec8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
46gm200_ltc = { 46gm200_ltc = {
47 .oneinit = gm200_ltc_oneinit, 47 .oneinit = gm200_ltc_oneinit,
48 .init = gm200_ltc_init, 48 .init = gm200_ltc_init,
49 .intr = gm107_ltc_intr, /*XXX: not validated */ 49 .intr = gm107_ltc_intr,
50 .cbc_clear = gm107_ltc_cbc_clear, 50 .cbc_clear = gm107_ltc_cbc_clear,
51 .cbc_wait = gm107_ltc_cbc_wait, 51 .cbc_wait = gm107_ltc_cbc_wait,
52 .zbc = 16, 52 .zbc = 16,
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 73241c4eb7aa..336ad4de9981 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,6 +2,7 @@ config DRM_OMAP
2 tristate "OMAP DRM" 2 tristate "OMAP DRM"
3 depends on DRM 3 depends on DRM
4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
5 select OMAP2_DSS
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER 7 select DRM_KMS_FB_HELPER
7 select FB_SYS_FILLRECT 8 select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 2a618afe0f53..c226da145fb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,80 +1,80 @@
1menu "OMAPDRM External Display Device Drivers" 1menu "OMAPDRM External Display Device Drivers"
2 2
3config DISPLAY_ENCODER_OPA362 3config DRM_OMAP_ENCODER_OPA362
4 tristate "OPA362 external analog amplifier" 4 tristate "OPA362 external analog amplifier"
5 help 5 help
6 Driver for OPA362 external analog TV amplifier controlled 6 Driver for OPA362 external analog TV amplifier controlled
7 through a GPIO. 7 through a GPIO.
8 8
9config DISPLAY_ENCODER_TFP410 9config DRM_OMAP_ENCODER_TFP410
10 tristate "TFP410 DPI to DVI Encoder" 10 tristate "TFP410 DPI to DVI Encoder"
11 help 11 help
12 Driver for TFP410 DPI to DVI encoder. 12 Driver for TFP410 DPI to DVI encoder.
13 13
14config DISPLAY_ENCODER_TPD12S015 14config DRM_OMAP_ENCODER_TPD12S015
15 tristate "TPD12S015 HDMI ESD protection and level shifter" 15 tristate "TPD12S015 HDMI ESD protection and level shifter"
16 help 16 help
17 Driver for TPD12S015, which offers HDMI ESD protection and level 17 Driver for TPD12S015, which offers HDMI ESD protection and level
18 shifting. 18 shifting.
19 19
20config DISPLAY_CONNECTOR_DVI 20config DRM_OMAP_CONNECTOR_DVI
21 tristate "DVI Connector" 21 tristate "DVI Connector"
22 depends on I2C 22 depends on I2C
23 help 23 help
24 Driver for a generic DVI connector. 24 Driver for a generic DVI connector.
25 25
26config DISPLAY_CONNECTOR_HDMI 26config DRM_OMAP_CONNECTOR_HDMI
27 tristate "HDMI Connector" 27 tristate "HDMI Connector"
28 help 28 help
29 Driver for a generic HDMI connector. 29 Driver for a generic HDMI connector.
30 30
31config DISPLAY_CONNECTOR_ANALOG_TV 31config DRM_OMAP_CONNECTOR_ANALOG_TV
32 tristate "Analog TV Connector" 32 tristate "Analog TV Connector"
33 help 33 help
34 Driver for a generic analog TV connector. 34 Driver for a generic analog TV connector.
35 35
36config DISPLAY_PANEL_DPI 36config DRM_OMAP_PANEL_DPI
37 tristate "Generic DPI panel" 37 tristate "Generic DPI panel"
38 help 38 help
39 Driver for generic DPI panels. 39 Driver for generic DPI panels.
40 40
41config DISPLAY_PANEL_DSI_CM 41config DRM_OMAP_PANEL_DSI_CM
42 tristate "Generic DSI Command Mode Panel" 42 tristate "Generic DSI Command Mode Panel"
43 depends on BACKLIGHT_CLASS_DEVICE 43 depends on BACKLIGHT_CLASS_DEVICE
44 help 44 help
45 Driver for generic DSI command mode panels. 45 Driver for generic DSI command mode panels.
46 46
47config DISPLAY_PANEL_SONY_ACX565AKM 47config DRM_OMAP_PANEL_SONY_ACX565AKM
48 tristate "ACX565AKM Panel" 48 tristate "ACX565AKM Panel"
49 depends on SPI && BACKLIGHT_CLASS_DEVICE 49 depends on SPI && BACKLIGHT_CLASS_DEVICE
50 help 50 help
51 This is the LCD panel used on Nokia N900 51 This is the LCD panel used on Nokia N900
52 52
53config DISPLAY_PANEL_LGPHILIPS_LB035Q02 53config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
54 tristate "LG.Philips LB035Q02 LCD Panel" 54 tristate "LG.Philips LB035Q02 LCD Panel"
55 depends on SPI 55 depends on SPI
56 help 56 help
57 LCD Panel used on the Gumstix Overo Palo35 57 LCD Panel used on the Gumstix Overo Palo35
58 58
59config DISPLAY_PANEL_SHARP_LS037V7DW01 59config DRM_OMAP_PANEL_SHARP_LS037V7DW01
60 tristate "Sharp LS037V7DW01 LCD Panel" 60 tristate "Sharp LS037V7DW01 LCD Panel"
61 depends on BACKLIGHT_CLASS_DEVICE 61 depends on BACKLIGHT_CLASS_DEVICE
62 help 62 help
63 LCD Panel used in TI's SDP3430 and EVM boards 63 LCD Panel used in TI's SDP3430 and EVM boards
64 64
65config DISPLAY_PANEL_TPO_TD028TTEC1 65config DRM_OMAP_PANEL_TPO_TD028TTEC1
66 tristate "TPO TD028TTEC1 LCD Panel" 66 tristate "TPO TD028TTEC1 LCD Panel"
67 depends on SPI 67 depends on SPI
68 help 68 help
69 LCD panel used in Openmoko. 69 LCD panel used in Openmoko.
70 70
71config DISPLAY_PANEL_TPO_TD043MTEA1 71config DRM_OMAP_PANEL_TPO_TD043MTEA1
72 tristate "TPO TD043MTEA1 LCD Panel" 72 tristate "TPO TD043MTEA1 LCD Panel"
73 depends on SPI 73 depends on SPI
74 help 74 help
75 LCD Panel used in OMAP3 Pandora 75 LCD Panel used in OMAP3 Pandora
76 76
77config DISPLAY_PANEL_NEC_NL8048HL11 77config DRM_OMAP_PANEL_NEC_NL8048HL11
78 tristate "NEC NL8048HL11 Panel" 78 tristate "NEC NL8048HL11 Panel"
79 depends on SPI 79 depends on SPI
80 depends on BACKLIGHT_CLASS_DEVICE 80 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 9aa176bfbf2e..46baafb1a83e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,14 +1,14 @@
1obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o 1obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
2obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o 2obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
3obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o 3obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
4obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o 4obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
5obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o 5obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
6obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o 6obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
7obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o 7obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
8obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o 8obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
9obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o 9obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
10obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o 10obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
11obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 11obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
12obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o 12obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
13obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o 13obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
14obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o 14obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 8511c648a15c..3485d1ecd655 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,9 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h>
18#include <video/omap-panel-data.h> 17#include <video/omap-panel-data.h>
19 18
19#include "../dss/omapdss.h"
20
20struct panel_drv_data { 21struct panel_drv_data {
21 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
22 struct omap_dss_device *in; 23 struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
25 26
26 struct omap_video_timings timings; 27 struct omap_video_timings timings;
27 28
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 29 bool invert_polarity;
30}; 30};
31 31
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 45
46static const struct of_device_id tvc_of_match[]; 46static const struct of_device_id tvc_of_match[];
47 47
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 48#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 49
54static int tvc_connect(struct omap_dss_device *dssdev) 50static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 95 in->ops.atv->set_timings(in, &ddata->timings);
100 96
101 if (!ddata->dev->of_node) { 97 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 98 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 99
104 in->ops.atv->invert_vid_out_polarity(in, 100 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 101 ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 203
208 ddata->in = in; 204 ddata->in = in;
209 205
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 206 ddata->invert_polarity = pdata->invert_polarity;
212 207
213 dssdev = &ddata->dssdev; 208 dssdev = &ddata->dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 747f26a55e43..75f7827525cf 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,10 +15,10 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
21 19
20#include "../dss/omapdss.h"
21
22static const struct omap_video_timings dvic_default_timings = { 22static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 23 .x_res = 640,
24 .y_res = 480, 24 .y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 225fd8d6ab31..7bdf83af9797 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio/consumer.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
@@ -16,10 +17,10 @@
16#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
17 18
18#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
19
20#include <video/omapdss.h>
21#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
22 21
22#include "../dss/omapdss.h"
23
23static const struct omap_video_timings hdmic_default_timings = { 24static const struct omap_video_timings hdmic_default_timings = {
24 .x_res = 640, 25 .x_res = 640,
25 .y_res = 480, 26 .y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 8c246c213e06..fe4e7ec3bab0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -14,13 +14,12 @@
14 * the Free Software Foundation. 14 * the Free Software Foundation.
15 */ 15 */
16 16
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h>
22 21
23#include <video/omapdss.h> 22#include "../dss/omapdss.h"
24 23
25struct panel_drv_data { 24struct panel_drv_data {
26 struct omap_dss_device dssdev; 25 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 2fd5602880a7..d768217cefe0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -9,14 +9,13 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio.h> 12#include <linux/gpio/consumer.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include "../dss/omapdss.h"
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 916a89978387..46855c8f5cbf 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include "../dss/omapdss.h"
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e780fd4f8b46..7f16f985ab22 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -9,17 +9,19 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio.h> 12#include <linux/gpio/consumer.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18#include <linux/regulator/consumer.h>
18 19
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
23#include "../dss/omapdss.h"
24
23struct panel_drv_data { 25struct panel_drv_data {
24 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
25 struct omap_dss_device *in; 27 struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
32 int backlight_gpio; 34 int backlight_gpio;
33 35
34 struct gpio_desc *enable_gpio; 36 struct gpio_desc *enable_gpio;
37 struct regulator *vcc_supply;
35}; 38};
36 39
37#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 40#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
83 if (r) 86 if (r)
84 return r; 87 return r;
85 88
89 r = regulator_enable(ddata->vcc_supply);
90 if (r) {
91 in->ops.dpi->disable(in);
92 return r;
93 }
94
86 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 95 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
87 96
88 if (gpio_is_valid(ddata->backlight_gpio)) 97 if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
105 gpio_set_value_cansleep(ddata->backlight_gpio, 0); 114 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
106 115
107 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 116 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
117 regulator_disable(ddata->vcc_supply);
108 118
109 in->ops.dpi->disable(in); 119 in->ops.dpi->disable(in);
110 120
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
213 223
214 ddata->enable_gpio = gpio; 224 ddata->enable_gpio = gpio;
215 225
226 /*
227 * Many different panels are supported by this driver and there are
228 * probably very different needs for their reset pins in regards to
229 * timing and order relative to the enable gpio. So for now it's just
230 * ensured that the reset line isn't active.
231 */
232 gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
233 if (IS_ERR(gpio))
234 return PTR_ERR(gpio);
235
236 ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
237 if (IS_ERR(ddata->vcc_supply))
238 return PTR_ERR(ddata->vcc_supply);
239
216 ddata->backlight_gpio = -ENOENT; 240 ddata->backlight_gpio = -ENOENT;
217 241
218 r = of_get_display_timing(node, "panel-timing", &timing); 242 r = of_get_display_timing(node, "panel-timing", &timing);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 36485c2137ce..1b0cf2d8224b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -14,7 +14,7 @@
14#include <linux/backlight.h> 14#include <linux/backlight.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/fb.h> 16#include <linux/fb.h>
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/module.h> 20#include <linux/module.h>
@@ -25,10 +25,10 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 28#include <video/mipi_display.h>
31 29
30#include "../dss/omapdss.h"
31
32/* DSI Virtual channel. Hardcoded for now. */ 32/* DSI Virtual channel. Hardcoded for now. */
33#define TCH 0 33#define TCH 0
34 34
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 458f77bc473d..6dfb96cea293 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -15,9 +15,9 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h>
18 19
19#include <video/omapdss.h> 20#include "../dss/omapdss.h"
20#include <video/omap-panel-data.h>
21 21
22static struct omap_video_timings lb035q02_timings = { 22static struct omap_video_timings lb035q02_timings = {
23 .x_res = 320, 23 .x_res = 320,
@@ -50,9 +50,6 @@ struct panel_drv_data {
50 50
51 struct omap_video_timings videomode; 51 struct omap_video_timings videomode;
52 52
53 /* used for non-DT boot, to be removed */
54 int backlight_gpio;
55
56 struct gpio_desc *enable_gpio; 53 struct gpio_desc *enable_gpio;
57}; 54};
58 55
@@ -170,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
170 if (ddata->enable_gpio) 167 if (ddata->enable_gpio)
171 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 168 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
172 169
173 if (gpio_is_valid(ddata->backlight_gpio))
174 gpio_set_value_cansleep(ddata->backlight_gpio, 1);
175
176 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 170 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
177 171
178 return 0; 172 return 0;
@@ -189,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
189 if (ddata->enable_gpio) 183 if (ddata->enable_gpio)
190 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 184 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
191 185
192 if (gpio_is_valid(ddata->backlight_gpio))
193 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
194
195 in->ops.dpi->disable(in); 186 in->ops.dpi->disable(in);
196 187
197 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 188 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -255,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
255 246
256 ddata->enable_gpio = gpio; 247 ddata->enable_gpio = gpio;
257 248
258 ddata->backlight_gpio = -ENOENT;
259
260 in = omapdss_of_find_source_for_first_ep(node); 249 in = omapdss_of_find_source_for_first_ep(node);
261 if (IS_ERR(in)) { 250 if (IS_ERR(in)) {
262 dev_err(&spi->dev, "failed to find video source\n"); 251 dev_err(&spi->dev, "failed to find video source\n");
@@ -289,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
289 if (r) 278 if (r)
290 return r; 279 return r;
291 280
292 if (gpio_is_valid(ddata->backlight_gpio)) {
293 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
294 GPIOF_OUT_INIT_LOW, "panel backlight");
295 if (r)
296 goto err_gpio;
297 }
298
299 ddata->videomode = lb035q02_timings; 281 ddata->videomode = lb035q02_timings;
300 282
301 dssdev = &ddata->dssdev; 283 dssdev = &ddata->dssdev;
@@ -315,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
315 return 0; 297 return 0;
316 298
317err_reg: 299err_reg:
318err_gpio:
319 omap_dss_put_device(ddata->in); 300 omap_dss_put_device(ddata->in);
320 return r; 301 return r;
321} 302}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 780cb263a318..fc4c238c9583 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -15,10 +15,10 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
18#include <linux/gpio.h> 18#include <linux/gpio/consumer.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include "../dss/omapdss.h"
22 22
23struct panel_drv_data { 23struct panel_drv_data {
24 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 529a017602e4..3d3efc561ea9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -10,14 +10,14 @@
10 */ 10 */
11 11
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_gpio.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 19
20#include "../dss/omapdss.h"
21 21
22struct panel_drv_data { 22struct panel_drv_data {
23 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 31efcca801bd..157c512205d1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -29,13 +29,14 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/backlight.h> 30#include <linux/backlight.h>
31#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/gpio.h> 32#include <linux/gpio/consumer.h>
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h>
37#include <video/omap-panel-data.h> 36#include <video/omap-panel-data.h>
38 37
38#include "../dss/omapdss.h"
39
39#define MIPID_CMD_READ_DISP_ID 0x04 40#define MIPID_CMD_READ_DISP_ID 0x04
40#define MIPID_CMD_READ_RED 0x06 41#define MIPID_CMD_READ_RED 0x06
41#define MIPID_CMD_READ_GREEN 0x07 42#define MIPID_CMD_READ_GREEN 0x07
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index bd8d85041926..e859b3f893f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31
32#include "../dss/omapdss.h"
32 33
33struct panel_drv_data { 34struct panel_drv_data {
34 struct omap_dss_device dssdev; 35 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 03e2beb7b4f0..66c6bbe6472b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -14,12 +14,12 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/regulator/consumer.h> 16#include <linux/regulator/consumer.h>
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include "../dss/omapdss.h"
23 23
24#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
25#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 7e4e5bebabbe..6a3ebfcd7223 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -35,8 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include "omapdss.h"
39
40#include "dss.h" 39#include "dss.h"
41#include "dss_features.h" 40#include "dss_features.h"
42 41
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
196 core.default_display_name = def_disp_name; 195 core.default_display_name = def_disp_name;
197 else if (pdata->default_display_name) 196 else if (pdata->default_display_name)
198 core.default_display_name = pdata->default_display_name; 197 core.default_display_name = pdata->default_display_name;
199 else if (pdata->default_device)
200 core.default_display_name = pdata->default_device->name;
201 198
202 return 0; 199 return 0;
203 200
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index f83608b69e68..535240fba671 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -41,8 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include "omapdss.h"
45
46#include "dss.h" 45#include "dss.h"
47#include "dss_features.h" 46#include "dss_features.h"
48#include "dispc.h" 47#include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
113 * never both, we can just use this flag for now. 112 * never both, we can just use this flag for now.
114 */ 113 */
115 bool reverse_ilace_field_order:1; 114 bool reverse_ilace_field_order:1;
115
116 bool has_gamma_table:1;
117
118 bool has_gamma_i734_bug:1;
116}; 119};
117 120
118#define DISPC_MAX_NR_FIFOS 5 121#define DISPC_MAX_NR_FIFOS 5
122#define DISPC_MAX_CHANNEL_GAMMA 4
119 123
120static struct { 124static struct {
121 struct platform_device *pdev; 125 struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
135 bool ctx_valid; 139 bool ctx_valid;
136 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 140 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
137 141
142 u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
143
138 const struct dispc_features *feat; 144 const struct dispc_features *feat;
139 145
140 bool is_enabled; 146 bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
178 u8 low; 184 u8 low;
179}; 185};
180 186
187struct dispc_gamma_desc {
188 u32 len;
189 u32 bits;
190 u16 reg;
191 bool has_index;
192};
193
181static const struct { 194static const struct {
182 const char *name; 195 const char *name;
183 u32 vsync_irq; 196 u32 vsync_irq;
184 u32 framedone_irq; 197 u32 framedone_irq;
185 u32 sync_lost_irq; 198 u32 sync_lost_irq;
199 struct dispc_gamma_desc gamma;
186 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; 200 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
187} mgr_desc[] = { 201} mgr_desc[] = {
188 [OMAP_DSS_CHANNEL_LCD] = { 202 [OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
190 .vsync_irq = DISPC_IRQ_VSYNC, 204 .vsync_irq = DISPC_IRQ_VSYNC,
191 .framedone_irq = DISPC_IRQ_FRAMEDONE, 205 .framedone_irq = DISPC_IRQ_FRAMEDONE,
192 .sync_lost_irq = DISPC_IRQ_SYNC_LOST, 206 .sync_lost_irq = DISPC_IRQ_SYNC_LOST,
207 .gamma = {
208 .len = 256,
209 .bits = 8,
210 .reg = DISPC_GAMMA_TABLE0,
211 .has_index = true,
212 },
193 .reg_desc = { 213 .reg_desc = {
194 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, 214 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
195 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, 215 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
@@ -207,6 +227,12 @@ static const struct {
207 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, 227 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
208 .framedone_irq = DISPC_IRQ_FRAMEDONETV, 228 .framedone_irq = DISPC_IRQ_FRAMEDONETV,
209 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, 229 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
230 .gamma = {
231 .len = 1024,
232 .bits = 10,
233 .reg = DISPC_GAMMA_TABLE2,
234 .has_index = false,
235 },
210 .reg_desc = { 236 .reg_desc = {
211 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, 237 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
212 [DISPC_MGR_FLD_STNTFT] = { }, 238 [DISPC_MGR_FLD_STNTFT] = { },
@@ -224,6 +250,12 @@ static const struct {
224 .vsync_irq = DISPC_IRQ_VSYNC2, 250 .vsync_irq = DISPC_IRQ_VSYNC2,
225 .framedone_irq = DISPC_IRQ_FRAMEDONE2, 251 .framedone_irq = DISPC_IRQ_FRAMEDONE2,
226 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, 252 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
253 .gamma = {
254 .len = 256,
255 .bits = 8,
256 .reg = DISPC_GAMMA_TABLE1,
257 .has_index = true,
258 },
227 .reg_desc = { 259 .reg_desc = {
228 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, 260 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
229 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, 261 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
@@ -241,6 +273,12 @@ static const struct {
241 .vsync_irq = DISPC_IRQ_VSYNC3, 273 .vsync_irq = DISPC_IRQ_VSYNC3,
242 .framedone_irq = DISPC_IRQ_FRAMEDONE3, 274 .framedone_irq = DISPC_IRQ_FRAMEDONE3,
243 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, 275 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
276 .gamma = {
277 .len = 256,
278 .bits = 8,
279 .reg = DISPC_GAMMA_TABLE3,
280 .has_index = true,
281 },
244 .reg_desc = { 282 .reg_desc = {
245 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, 283 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
246 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, 284 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
1084 return unit * 8; 1122 return unit * 8;
1085} 1123}
1086 1124
1087void dispc_enable_gamma_table(bool enable)
1088{
1089 /*
1090 * This is partially implemented to support only disabling of
1091 * the gamma table.
1092 */
1093 if (enable) {
1094 DSSWARN("Gamma table enabling for TV not yet supported");
1095 return;
1096 }
1097
1098 REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
1099}
1100
1101static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) 1125static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
1102{ 1126{
1103 if (channel == OMAP_DSS_CHANNEL_DIGIT) 1127 if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
3299 3323
3300static unsigned long dispc_fclk_rate(void) 3324static unsigned long dispc_fclk_rate(void)
3301{ 3325{
3302 struct dss_pll *pll; 3326 unsigned long r;
3303 unsigned long r = 0; 3327 enum dss_clk_source src;
3328
3329 src = dss_get_dispc_clk_source();
3304 3330
3305 switch (dss_get_dispc_clk_source()) { 3331 if (src == DSS_CLK_SRC_FCK) {
3306 case OMAP_DSS_CLK_SRC_FCK:
3307 r = dss_get_dispc_clk_rate(); 3332 r = dss_get_dispc_clk_rate();
3308 break; 3333 } else {
3309 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 3334 struct dss_pll *pll;
3310 pll = dss_pll_find("dsi0"); 3335 unsigned clkout_idx;
3311 if (!pll)
3312 pll = dss_pll_find("video0");
3313 3336
3314 r = pll->cinfo.clkout[0]; 3337 pll = dss_pll_find_by_src(src);
3315 break; 3338 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3316 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
3317 pll = dss_pll_find("dsi1");
3318 if (!pll)
3319 pll = dss_pll_find("video1");
3320 3339
3321 r = pll->cinfo.clkout[0]; 3340 r = pll->cinfo.clkout[clkout_idx];
3322 break;
3323 default:
3324 BUG();
3325 return 0;
3326 } 3341 }
3327 3342
3328 return r; 3343 return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
3330 3345
3331static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) 3346static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
3332{ 3347{
3333 struct dss_pll *pll;
3334 int lcd; 3348 int lcd;
3335 unsigned long r; 3349 unsigned long r;
3336 u32 l; 3350 enum dss_clk_source src;
3337
3338 if (dss_mgr_is_lcd(channel)) {
3339 l = dispc_read_reg(DISPC_DIVISORo(channel));
3340 3351
3341 lcd = FLD_GET(l, 23, 16); 3352 /* for TV, LCLK rate is the FCLK rate */
3353 if (!dss_mgr_is_lcd(channel))
3354 return dispc_fclk_rate();
3342 3355
3343 switch (dss_get_lcd_clk_source(channel)) { 3356 src = dss_get_lcd_clk_source(channel);
3344 case OMAP_DSS_CLK_SRC_FCK:
3345 r = dss_get_dispc_clk_rate();
3346 break;
3347 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
3348 pll = dss_pll_find("dsi0");
3349 if (!pll)
3350 pll = dss_pll_find("video0");
3351 3357
3352 r = pll->cinfo.clkout[0]; 3358 if (src == DSS_CLK_SRC_FCK) {
3353 break; 3359 r = dss_get_dispc_clk_rate();
3354 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 3360 } else {
3355 pll = dss_pll_find("dsi1"); 3361 struct dss_pll *pll;
3356 if (!pll) 3362 unsigned clkout_idx;
3357 pll = dss_pll_find("video1");
3358 3363
3359 r = pll->cinfo.clkout[0]; 3364 pll = dss_pll_find_by_src(src);
3360 break; 3365 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3361 default:
3362 BUG();
3363 return 0;
3364 }
3365 3366
3366 return r / lcd; 3367 r = pll->cinfo.clkout[clkout_idx];
3367 } else {
3368 return dispc_fclk_rate();
3369 } 3368 }
3369
3370 lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
3371
3372 return r / lcd;
3370} 3373}
3371 3374
3372static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) 3375static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
3426static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) 3429static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
3427{ 3430{
3428 int lcd, pcd; 3431 int lcd, pcd;
3429 enum omap_dss_clk_source lcd_clk_src; 3432 enum dss_clk_source lcd_clk_src;
3430 3433
3431 seq_printf(s, "- %s -\n", mgr_desc[channel].name); 3434 seq_printf(s, "- %s -\n", mgr_desc[channel].name);
3432 3435
3433 lcd_clk_src = dss_get_lcd_clk_source(channel); 3436 lcd_clk_src = dss_get_lcd_clk_source(channel);
3434 3437
3435 seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, 3438 seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
3436 dss_get_generic_clk_source_name(lcd_clk_src), 3439 dss_get_clk_source_name(lcd_clk_src));
3437 dss_feat_get_clk_source_name(lcd_clk_src));
3438 3440
3439 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); 3441 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
3440 3442
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
3448{ 3450{
3449 int lcd; 3451 int lcd;
3450 u32 l; 3452 u32 l;
3451 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 3453 enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
3452 3454
3453 if (dispc_runtime_get()) 3455 if (dispc_runtime_get())
3454 return; 3456 return;
3455 3457
3456 seq_printf(s, "- DISPC -\n"); 3458 seq_printf(s, "- DISPC -\n");
3457 3459
3458 seq_printf(s, "dispc fclk source = %s (%s)\n", 3460 seq_printf(s, "dispc fclk source = %s\n",
3459 dss_get_generic_clk_source_name(dispc_clk_src), 3461 dss_get_clk_source_name(dispc_clk_src));
3460 dss_feat_get_clk_source_name(dispc_clk_src));
3461 3462
3462 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); 3463 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
3463 3464
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
3814 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ 3815 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
3815} 3816}
3816 3817
3818u32 dispc_mgr_gamma_size(enum omap_channel channel)
3819{
3820 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3821
3822 if (!dispc.feat->has_gamma_table)
3823 return 0;
3824
3825 return gdesc->len;
3826}
3827EXPORT_SYMBOL(dispc_mgr_gamma_size);
3828
3829static void dispc_mgr_write_gamma_table(enum omap_channel channel)
3830{
3831 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3832 u32 *table = dispc.gamma_table[channel];
3833 unsigned int i;
3834
3835 DSSDBG("%s: channel %d\n", __func__, channel);
3836
3837 for (i = 0; i < gdesc->len; ++i) {
3838 u32 v = table[i];
3839
3840 if (gdesc->has_index)
3841 v |= i << 24;
3842 else if (i == 0)
3843 v |= 1 << 31;
3844
3845 dispc_write_reg(gdesc->reg, v);
3846 }
3847}
3848
3849static void dispc_restore_gamma_tables(void)
3850{
3851 DSSDBG("%s()\n", __func__);
3852
3853 if (!dispc.feat->has_gamma_table)
3854 return;
3855
3856 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
3857
3858 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
3859
3860 if (dss_has_feature(FEAT_MGR_LCD2))
3861 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
3862
3863 if (dss_has_feature(FEAT_MGR_LCD3))
3864 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
3865}
3866
3867static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
3868 { .red = 0, .green = 0, .blue = 0, },
3869 { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
3870};
3871
3872void dispc_mgr_set_gamma(enum omap_channel channel,
3873 const struct drm_color_lut *lut,
3874 unsigned int length)
3875{
3876 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3877 u32 *table = dispc.gamma_table[channel];
3878 uint i;
3879
3880 DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
3881 channel, length, gdesc->len);
3882
3883 if (!dispc.feat->has_gamma_table)
3884 return;
3885
3886 if (lut == NULL || length < 2) {
3887 lut = dispc_mgr_gamma_default_lut;
3888 length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
3889 }
3890
3891 for (i = 0; i < length - 1; ++i) {
3892 uint first = i * (gdesc->len - 1) / (length - 1);
3893 uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
3894 uint w = last - first;
3895 u16 r, g, b;
3896 uint j;
3897
3898 if (w == 0)
3899 continue;
3900
3901 for (j = 0; j <= w; j++) {
3902 r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
3903 g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
3904 b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
3905
3906 r >>= 16 - gdesc->bits;
3907 g >>= 16 - gdesc->bits;
3908 b >>= 16 - gdesc->bits;
3909
3910 table[first + j] = (r << (gdesc->bits * 2)) |
3911 (g << gdesc->bits) | b;
3912 }
3913 }
3914
3915 if (dispc.is_enabled)
3916 dispc_mgr_write_gamma_table(channel);
3917}
3918EXPORT_SYMBOL(dispc_mgr_set_gamma);
3919
3920static int dispc_init_gamma_tables(void)
3921{
3922 int channel;
3923
3924 if (!dispc.feat->has_gamma_table)
3925 return 0;
3926
3927 for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
3928 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3929 u32 *gt;
3930
3931 if (channel == OMAP_DSS_CHANNEL_LCD2 &&
3932 !dss_has_feature(FEAT_MGR_LCD2))
3933 continue;
3934
3935 if (channel == OMAP_DSS_CHANNEL_LCD3 &&
3936 !dss_has_feature(FEAT_MGR_LCD3))
3937 continue;
3938
3939 gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
3940 sizeof(u32), GFP_KERNEL);
3941 if (!gt)
3942 return -ENOMEM;
3943
3944 dispc.gamma_table[channel] = gt;
3945
3946 dispc_mgr_set_gamma(channel, NULL, 0);
3947 }
3948 return 0;
3949}
3950
3817static void _omap_dispc_initial_config(void) 3951static void _omap_dispc_initial_config(void)
3818{ 3952{
3819 u32 l; 3953 u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
3829 dispc.core_clk_rate = dispc_fclk_rate(); 3963 dispc.core_clk_rate = dispc_fclk_rate();
3830 } 3964 }
3831 3965
3832 /* FUNCGATED */ 3966 /* Use gamma table mode, instead of palette mode */
3833 if (dss_has_feature(FEAT_FUNCGATED)) 3967 if (dispc.feat->has_gamma_table)
3968 REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
3969
3970 /* For older DSS versions (FEAT_FUNCGATED) this enables
3971 * func-clock auto-gating. For newer versions
3972 * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
3973 */
3974 if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
3834 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); 3975 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
3835 3976
3836 dispc_setup_color_conv_coef(); 3977 dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
3934 .has_writeback = true, 4075 .has_writeback = true,
3935 .supports_double_pixel = true, 4076 .supports_double_pixel = true,
3936 .reverse_ilace_field_order = true, 4077 .reverse_ilace_field_order = true,
4078 .has_gamma_table = true,
4079 .has_gamma_i734_bug = true,
3937}; 4080};
3938 4081
3939static const struct dispc_features omap54xx_dispc_feats = { 4082static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
3959 .has_writeback = true, 4102 .has_writeback = true,
3960 .supports_double_pixel = true, 4103 .supports_double_pixel = true,
3961 .reverse_ilace_field_order = true, 4104 .reverse_ilace_field_order = true,
4105 .has_gamma_table = true,
4106 .has_gamma_i734_bug = true,
3962}; 4107};
3963 4108
3964static int dispc_init_features(struct platform_device *pdev) 4109static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
4050} 4195}
4051EXPORT_SYMBOL(dispc_free_irq); 4196EXPORT_SYMBOL(dispc_free_irq);
4052 4197
4198/*
4199 * Workaround for errata i734 in DSS dispc
4200 * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
4201 *
4202 * For gamma tables to work on LCD1 the GFX plane has to be used at
4203 * least once after DSS HW has come out of reset. The workaround
4204 * sets up a minimal LCD setup with GFX plane and waits for one
4205 * vertical sync irq before disabling the setup and continuing with
4206 * the context restore. The physical outputs are gated during the
4207 * operation. This workaround requires that gamma table's LOADMODE
4208 * is set to 0x2 in DISPC_CONTROL1 register.
4209 *
4210 * For details see:
4211 * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
4212 * Literature Number: SWPZ037E
4213 * Or some other relevant errata document for the DSS IP version.
4214 */
4215
4216static const struct dispc_errata_i734_data {
4217 struct omap_video_timings timings;
4218 struct omap_overlay_info ovli;
4219 struct omap_overlay_manager_info mgri;
4220 struct dss_lcd_mgr_config lcd_conf;
4221} i734 = {
4222 .timings = {
4223 .x_res = 8, .y_res = 1,
4224 .pixelclock = 16000000,
4225 .hsw = 8, .hfp = 4, .hbp = 4,
4226 .vsw = 1, .vfp = 1, .vbp = 1,
4227 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4228 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4229 .interlace = false,
4230 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4231 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
4232 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4233 .double_pixel = false,
4234 },
4235 .ovli = {
4236 .screen_width = 1,
4237 .width = 1, .height = 1,
4238 .color_mode = OMAP_DSS_COLOR_RGB24U,
4239 .rotation = OMAP_DSS_ROT_0,
4240 .rotation_type = OMAP_DSS_ROT_DMA,
4241 .mirror = 0,
4242 .pos_x = 0, .pos_y = 0,
4243 .out_width = 0, .out_height = 0,
4244 .global_alpha = 0xff,
4245 .pre_mult_alpha = 0,
4246 .zorder = 0,
4247 },
4248 .mgri = {
4249 .default_color = 0,
4250 .trans_enabled = false,
4251 .partial_alpha_enabled = false,
4252 .cpr_enable = false,
4253 },
4254 .lcd_conf = {
4255 .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
4256 .stallmode = false,
4257 .fifohandcheck = false,
4258 .clock_info = {
4259 .lck_div = 1,
4260 .pck_div = 2,
4261 },
4262 .video_port_width = 24,
4263 .lcden_sig_polarity = 0,
4264 },
4265};
4266
4267static struct i734_buf {
4268 size_t size;
4269 dma_addr_t paddr;
4270 void *vaddr;
4271} i734_buf;
4272
4273static int dispc_errata_i734_wa_init(void)
4274{
4275 if (!dispc.feat->has_gamma_i734_bug)
4276 return 0;
4277
4278 i734_buf.size = i734.ovli.width * i734.ovli.height *
4279 color_mode_to_bpp(i734.ovli.color_mode) / 8;
4280
4281 i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
4282 &i734_buf.paddr, GFP_KERNEL);
4283 if (!i734_buf.vaddr) {
4284 dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
4285 __func__);
4286 return -ENOMEM;
4287 }
4288
4289 return 0;
4290}
4291
4292static void dispc_errata_i734_wa_fini(void)
4293{
4294 if (!dispc.feat->has_gamma_i734_bug)
4295 return;
4296
4297 dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
4298 i734_buf.paddr);
4299}
4300
4301static void dispc_errata_i734_wa(void)
4302{
4303 u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
4304 struct omap_overlay_info ovli;
4305 struct dss_lcd_mgr_config lcd_conf;
4306 u32 gatestate;
4307 unsigned int count;
4308
4309 if (!dispc.feat->has_gamma_i734_bug)
4310 return;
4311
4312 gatestate = REG_GET(DISPC_CONFIG, 8, 4);
4313
4314 ovli = i734.ovli;
4315 ovli.paddr = i734_buf.paddr;
4316 lcd_conf = i734.lcd_conf;
4317
4318 /* Gate all LCD1 outputs */
4319 REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
4320
4321 /* Setup and enable GFX plane */
4322 dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
4323 dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
4324 dispc_ovl_enable(OMAP_DSS_GFX, true);
4325
4326 /* Set up and enable display manager for LCD1 */
4327 dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
4328 dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
4329 &lcd_conf.clock_info);
4330 dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
4331 dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
4332
4333 dispc_clear_irqstatus(framedone_irq);
4334
4335 /* Enable and shut the channel to produce just one frame */
4336 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
4337 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
4338
4339 /* Busy wait for framedone. We can't fiddle with irq handlers
4340 * in PM resume. Typically the loop runs less than 5 times and
4341 * waits less than a micro second.
4342 */
4343 count = 0;
4344 while (!(dispc_read_irqstatus() & framedone_irq)) {
4345 if (count++ > 10000) {
4346 dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
4347 __func__);
4348 break;
4349 }
4350 }
4351 dispc_ovl_enable(OMAP_DSS_GFX, false);
4352
4353 /* Clear all irq bits before continuing */
4354 dispc_clear_irqstatus(0xffffffff);
4355
4356 /* Restore the original state to LCD1 output gates */
4357 REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
4358}
4359
4053/* DISPC HW IP initialisation */ 4360/* DISPC HW IP initialisation */
4054static int dispc_bind(struct device *dev, struct device *master, void *data) 4361static int dispc_bind(struct device *dev, struct device *master, void *data)
4055{ 4362{
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4067 if (r) 4374 if (r)
4068 return r; 4375 return r;
4069 4376
4377 r = dispc_errata_i734_wa_init();
4378 if (r)
4379 return r;
4380
4070 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); 4381 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
4071 if (!dispc_mem) { 4382 if (!dispc_mem) {
4072 DSSERR("can't get IORESOURCE_MEM DISPC\n"); 4383 DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4100 } 4411 }
4101 } 4412 }
4102 4413
4414 r = dispc_init_gamma_tables();
4415 if (r)
4416 return r;
4417
4103 pm_runtime_enable(&pdev->dev); 4418 pm_runtime_enable(&pdev->dev);
4104 4419
4105 r = dispc_runtime_get(); 4420 r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
4127 void *data) 4442 void *data)
4128{ 4443{
4129 pm_runtime_disable(dev); 4444 pm_runtime_disable(dev);
4445
4446 dispc_errata_i734_wa_fini();
4130} 4447}
4131 4448
4132static const struct component_ops dispc_component_ops = { 4449static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
4169 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { 4486 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
4170 _omap_dispc_initial_config(); 4487 _omap_dispc_initial_config();
4171 4488
4489 dispc_errata_i734_wa();
4490
4172 dispc_restore_context(); 4491 dispc_restore_context();
4492
4493 dispc_restore_gamma_tables();
4173 } 4494 }
4174 4495
4175 dispc.is_enabled = true; 4496 dispc.is_enabled = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 483744223dd1..bc1d8126ee87 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -42,6 +42,11 @@
42#define DISPC_MSTANDBY_CTRL 0x0858 42#define DISPC_MSTANDBY_CTRL 0x0858
43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C 43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
44 44
45#define DISPC_GAMMA_TABLE0 0x0630
46#define DISPC_GAMMA_TABLE1 0x0634
47#define DISPC_GAMMA_TABLE2 0x0638
48#define DISPC_GAMMA_TABLE3 0x0850
49
45/* DISPC overlay registers */ 50/* DISPC overlay registers */
46#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ 51#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
47 DISPC_BA0_OFFSET(n)) 52 DISPC_BA0_OFFSET(n))
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 038c15b04215..34fad2376f8d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h>
22 21
22#include "omapdss.h"
23#include "dispc.h" 23#include "dispc.h"
24 24
25static const struct dispc_coef coef3_M8[8] = { 25static const struct dispc_coef coef3_M8[8] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9f3dd09b0a6c..8dcdd7cf9937 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include "omapdss.h"
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 97ea60257884..b268295b76cf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -34,17 +34,15 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include "omapdss.h"
38
39#include "dss.h" 38#include "dss.h"
40#include "dss_features.h" 39#include "dss_features.h"
41 40
42#define HSDIV_DISPC 0
43
44struct dpi_data { 41struct dpi_data {
45 struct platform_device *pdev; 42 struct platform_device *pdev;
46 43
47 struct regulator *vdds_dsi_reg; 44 struct regulator *vdds_dsi_reg;
45 enum dss_clk_source clk_src;
48 struct dss_pll *pll; 46 struct dss_pll *pll;
49 47
50 struct mutex lock; 48 struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
69 return dev_get_drvdata(&pdev->dev); 67 return dev_get_drvdata(&pdev->dev);
70} 68}
71 69
72static struct dss_pll *dpi_get_pll(enum omap_channel channel) 70static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
73{ 71{
74 /* 72 /*
75 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL 73 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
83 case OMAPDSS_VER_OMAP3630: 81 case OMAPDSS_VER_OMAP3630:
84 case OMAPDSS_VER_AM35xx: 82 case OMAPDSS_VER_AM35xx:
85 case OMAPDSS_VER_AM43xx: 83 case OMAPDSS_VER_AM43xx:
86 return NULL; 84 return DSS_CLK_SRC_FCK;
87 85
88 case OMAPDSS_VER_OMAP4430_ES1: 86 case OMAPDSS_VER_OMAP4430_ES1:
89 case OMAPDSS_VER_OMAP4430_ES2: 87 case OMAPDSS_VER_OMAP4430_ES2:
90 case OMAPDSS_VER_OMAP4: 88 case OMAPDSS_VER_OMAP4:
91 switch (channel) { 89 switch (channel) {
92 case OMAP_DSS_CHANNEL_LCD: 90 case OMAP_DSS_CHANNEL_LCD:
93 return dss_pll_find("dsi0"); 91 return DSS_CLK_SRC_PLL1_1;
94 case OMAP_DSS_CHANNEL_LCD2: 92 case OMAP_DSS_CHANNEL_LCD2:
95 return dss_pll_find("dsi1"); 93 return DSS_CLK_SRC_PLL2_1;
96 default: 94 default:
97 return NULL; 95 return DSS_CLK_SRC_FCK;
98 } 96 }
99 97
100 case OMAPDSS_VER_OMAP5: 98 case OMAPDSS_VER_OMAP5:
101 switch (channel) { 99 switch (channel) {
102 case OMAP_DSS_CHANNEL_LCD: 100 case OMAP_DSS_CHANNEL_LCD:
103 return dss_pll_find("dsi0"); 101 return DSS_CLK_SRC_PLL1_1;
104 case OMAP_DSS_CHANNEL_LCD3: 102 case OMAP_DSS_CHANNEL_LCD3:
105 return dss_pll_find("dsi1"); 103 return DSS_CLK_SRC_PLL2_1;
104 case OMAP_DSS_CHANNEL_LCD2:
106 default: 105 default:
107 return NULL; 106 return DSS_CLK_SRC_FCK;
108 } 107 }
109 108
110 case OMAPDSS_VER_DRA7xx: 109 case OMAPDSS_VER_DRA7xx:
111 switch (channel) { 110 switch (channel) {
112 case OMAP_DSS_CHANNEL_LCD: 111 case OMAP_DSS_CHANNEL_LCD:
112 return DSS_CLK_SRC_PLL1_1;
113 case OMAP_DSS_CHANNEL_LCD2: 113 case OMAP_DSS_CHANNEL_LCD2:
114 return dss_pll_find("video0"); 114 return DSS_CLK_SRC_PLL1_3;
115 case OMAP_DSS_CHANNEL_LCD3: 115 case OMAP_DSS_CHANNEL_LCD3:
116 return dss_pll_find("video1"); 116 return DSS_CLK_SRC_PLL2_1;
117 default: 117 default:
118 return NULL; 118 return DSS_CLK_SRC_FCK;
119 } 119 }
120 120
121 default: 121 default:
122 return NULL; 122 return DSS_CLK_SRC_FCK;
123 }
124}
125
126static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
127{
128 switch (channel) {
129 case OMAP_DSS_CHANNEL_LCD:
130 return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
131 case OMAP_DSS_CHANNEL_LCD2:
132 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
133 case OMAP_DSS_CHANNEL_LCD3:
134 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
135 default:
136 /* this shouldn't happen */
137 WARN_ON(1);
138 return OMAP_DSS_CLK_SRC_FCK;
139 } 123 }
140} 124}
141 125
142struct dpi_clk_calc_ctx { 126struct dpi_clk_calc_ctx {
143 struct dss_pll *pll; 127 struct dss_pll *pll;
128 unsigned clkout_idx;
144 129
145 /* inputs */ 130 /* inputs */
146 131
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
148 133
149 /* outputs */ 134 /* outputs */
150 135
151 struct dss_pll_clock_info dsi_cinfo; 136 struct dss_pll_clock_info pll_cinfo;
152 unsigned long fck; 137 unsigned long fck;
153 struct dispc_clock_info dispc_cinfo; 138 struct dispc_clock_info dispc_cinfo;
154}; 139};
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
193 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) 178 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
194 return false; 179 return false;
195 180
196 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; 181 ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
197 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; 182 ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
198 183
199 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, 184 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
200 dpi_calc_dispc_cb, ctx); 185 dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
207{ 192{
208 struct dpi_clk_calc_ctx *ctx = data; 193 struct dpi_clk_calc_ctx *ctx = data;
209 194
210 ctx->dsi_cinfo.n = n; 195 ctx->pll_cinfo.n = n;
211 ctx->dsi_cinfo.m = m; 196 ctx->pll_cinfo.m = m;
212 ctx->dsi_cinfo.fint = fint; 197 ctx->pll_cinfo.fint = fint;
213 ctx->dsi_cinfo.clkdco = clkdco; 198 ctx->pll_cinfo.clkdco = clkdco;
214 199
215 return dss_pll_hsdiv_calc(ctx->pll, clkdco, 200 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
216 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 201 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
217 dpi_calc_hsdiv_cb, ctx); 202 dpi_calc_hsdiv_cb, ctx);
218} 203}
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
227 dpi_calc_dispc_cb, ctx); 212 dpi_calc_dispc_cb, ctx);
228} 213}
229 214
230static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, 215static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
231 struct dpi_clk_calc_ctx *ctx) 216 struct dpi_clk_calc_ctx *ctx)
232{ 217{
233 unsigned long clkin; 218 unsigned long clkin;
234 unsigned long pll_min, pll_max;
235 219
236 memset(ctx, 0, sizeof(*ctx)); 220 memset(ctx, 0, sizeof(*ctx));
237 ctx->pll = dpi->pll; 221 ctx->pll = dpi->pll;
238 ctx->pck_min = pck - 1000; 222 ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
239 ctx->pck_max = pck + 1000;
240 223
241 pll_min = 0; 224 clkin = clk_get_rate(dpi->pll->clkin);
242 pll_max = 0;
243 225
244 clkin = clk_get_rate(ctx->pll->clkin); 226 if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
227 unsigned long pll_min, pll_max;
245 228
246 return dss_pll_calc(ctx->pll, clkin, 229 ctx->pck_min = pck - 1000;
247 pll_min, pll_max, 230 ctx->pck_max = pck + 1000;
248 dpi_calc_pll_cb, ctx); 231
232 pll_min = 0;
233 pll_max = 0;
234
235 return dss_pll_calc_a(ctx->pll, clkin,
236 pll_min, pll_max,
237 dpi_calc_pll_cb, ctx);
238 } else { /* DSS_PLL_TYPE_B */
239 dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
240
241 ctx->dispc_cinfo.lck_div = 1;
242 ctx->dispc_cinfo.pck_div = 1;
243 ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
244 ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
245
246 return true;
247 }
249} 248}
250 249
251static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) 250static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
279 278
280 279
281 280
282static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, 281static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
283 unsigned long pck_req, unsigned long *fck, int *lck_div, 282 unsigned long pck_req, unsigned long *fck, int *lck_div,
284 int *pck_div) 283 int *pck_div)
285{ 284{
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
287 int r; 286 int r;
288 bool ok; 287 bool ok;
289 288
290 ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); 289 ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
291 if (!ok) 290 if (!ok)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); 293 r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
295 if (r) 294 if (r)
296 return r; 295 return r;
297 296
298 dss_select_lcd_clk_source(channel, 297 dss_select_lcd_clk_source(channel, dpi->clk_src);
299 dpi_get_alt_clk_src(channel));
300 298
301 dpi->mgr_config.clock_info = ctx.dispc_cinfo; 299 dpi->mgr_config.clock_info = ctx.dispc_cinfo;
302 300
303 *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 301 *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
304 *lck_div = ctx.dispc_cinfo.lck_div; 302 *lck_div = ctx.dispc_cinfo.lck_div;
305 *pck_div = ctx.dispc_cinfo.pck_div; 303 *pck_div = ctx.dispc_cinfo.pck_div;
306 304
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
342 int r = 0; 340 int r = 0;
343 341
344 if (dpi->pll) 342 if (dpi->pll)
345 r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, 343 r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
346 &lck_div, &pck_div); 344 &lck_div, &pck_div);
347 else 345 else
348 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, 346 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
419 if (dpi->pll) { 417 if (dpi->pll) {
420 r = dss_pll_enable(dpi->pll); 418 r = dss_pll_enable(dpi->pll);
421 if (r) 419 if (r)
422 goto err_dsi_pll_init; 420 goto err_pll_init;
423 } 421 }
424 422
425 r = dpi_set_mode(dpi); 423 r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
442err_set_mode: 440err_set_mode:
443 if (dpi->pll) 441 if (dpi->pll)
444 dss_pll_disable(dpi->pll); 442 dss_pll_disable(dpi->pll);
445err_dsi_pll_init: 443err_pll_init:
446err_src_sel: 444err_src_sel:
447 dispc_runtime_put(); 445 dispc_runtime_put();
448err_get_dispc: 446err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
465 dss_mgr_disable(channel); 463 dss_mgr_disable(channel);
466 464
467 if (dpi->pll) { 465 if (dpi->pll) {
468 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 466 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
469 dss_pll_disable(dpi->pll); 467 dss_pll_disable(dpi->pll);
470 } 468 }
471 469
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
524 return -EINVAL; 522 return -EINVAL;
525 523
526 if (dpi->pll) { 524 if (dpi->pll) {
527 ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); 525 ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
528 if (!ok) 526 if (!ok)
529 return -EINVAL; 527 return -EINVAL;
530 528
531 fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 529 fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
532 } else { 530 } else {
533 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); 531 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
534 if (!ok) 532 if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
558 mutex_unlock(&dpi->lock); 556 mutex_unlock(&dpi->lock);
559} 557}
560 558
561static int dpi_verify_dsi_pll(struct dss_pll *pll) 559static int dpi_verify_pll(struct dss_pll *pll)
562{ 560{
563 int r; 561 int r;
564 562
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
602 if (dpi->pll) 600 if (dpi->pll)
603 return; 601 return;
604 602
605 pll = dpi_get_pll(dpi->output.dispc_channel); 603 dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
604
605 pll = dss_pll_find_by_src(dpi->clk_src);
606 if (!pll) 606 if (!pll)
607 return; 607 return;
608 608
609 /* On DRA7 we need to set a mux to use the PLL */ 609 if (dpi_verify_pll(pll)) {
610 if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) 610 DSSWARN("PLL not operational\n");
611 dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
612
613 if (dpi_verify_dsi_pll(pll)) {
614 DSSWARN("DSI PLL not operational\n");
615 return; 611 return;
616 } 612 }
617 613
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 8730646a0cbb..e1be5e795cd8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -42,9 +42,9 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h>
46#include <video/mipi_display.h> 45#include <video/mipi_display.h>
47 46
47#include "omapdss.h"
48#include "dss.h" 48#include "dss.h"
49#include "dss_features.h" 49#include "dss_features.h"
50 50
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
1167{ 1167{
1168 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1168 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1169 struct regulator *vdds_dsi; 1169 struct regulator *vdds_dsi;
1170 int r;
1171 1170
1172 if (dsi->vdds_dsi_reg != NULL) 1171 if (dsi->vdds_dsi_reg != NULL)
1173 return 0; 1172 return 0;
@@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
1180 return PTR_ERR(vdds_dsi); 1179 return PTR_ERR(vdds_dsi);
1181 } 1180 }
1182 1181
1183 if (regulator_can_change_voltage(vdds_dsi)) {
1184 r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
1185 if (r) {
1186 devm_regulator_put(vdds_dsi);
1187 DSSERR("can't set the DSI regulator voltage\n");
1188 return r;
1189 }
1190 }
1191
1192 dsi->vdds_dsi_reg = vdds_dsi; 1182 dsi->vdds_dsi_reg = vdds_dsi;
1193 1183
1194 return 0; 1184 return 0;
@@ -1271,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1271 unsigned long r; 1261 unsigned long r;
1272 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1262 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1273 1263
1274 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { 1264 if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
1275 /* DSI FCLK source is DSS_CLK_FCK */ 1265 /* DSI FCLK source is DSS_CLK_FCK */
1276 r = clk_get_rate(dsi->dss_clk); 1266 r = clk_get_rate(dsi->dss_clk);
1277 } else { 1267 } else {
@@ -1484,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1484{ 1474{
1485 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1475 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1486 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1476 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1487 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1477 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1488 int dsi_module = dsi->module_id; 1478 int dsi_module = dsi->module_id;
1489 struct dss_pll *pll = &dsi->pll; 1479 struct dss_pll *pll = &dsi->pll;
1490 1480
@@ -1504,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1504 cinfo->clkdco, cinfo->m); 1494 cinfo->clkdco, cinfo->m);
1505 1495
1506 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", 1496 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1507 dss_feat_get_clk_source_name(dsi_module == 0 ? 1497 dss_get_clk_source_name(dsi_module == 0 ?
1508 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 1498 DSS_CLK_SRC_PLL1_1 :
1509 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), 1499 DSS_CLK_SRC_PLL2_1),
1510 cinfo->clkout[HSDIV_DISPC], 1500 cinfo->clkout[HSDIV_DISPC],
1511 cinfo->mX[HSDIV_DISPC], 1501 cinfo->mX[HSDIV_DISPC],
1512 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1502 dispc_clk_src == DSS_CLK_SRC_FCK ?
1513 "off" : "on"); 1503 "off" : "on");
1514 1504
1515 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", 1505 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1516 dss_feat_get_clk_source_name(dsi_module == 0 ? 1506 dss_get_clk_source_name(dsi_module == 0 ?
1517 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 1507 DSS_CLK_SRC_PLL1_2 :
1518 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), 1508 DSS_CLK_SRC_PLL2_2),
1519 cinfo->clkout[HSDIV_DSI], 1509 cinfo->clkout[HSDIV_DSI],
1520 cinfo->mX[HSDIV_DSI], 1510 cinfo->mX[HSDIV_DSI],
1521 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1511 dsi_clk_src == DSS_CLK_SRC_FCK ?
1522 "off" : "on"); 1512 "off" : "on");
1523 1513
1524 seq_printf(s, "- DSI%d -\n", dsi_module + 1); 1514 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1525 1515
1526 seq_printf(s, "dsi fclk source = %s (%s)\n", 1516 seq_printf(s, "dsi fclk source = %s\n",
1527 dss_get_generic_clk_source_name(dsi_clk_src), 1517 dss_get_clk_source_name(dsi_clk_src));
1528 dss_feat_get_clk_source_name(dsi_clk_src));
1529 1518
1530 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); 1519 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1531 1520
@@ -4111,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
4111 int r; 4100 int r;
4112 4101
4113 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? 4102 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
4114 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 4103 DSS_CLK_SRC_PLL1_1 :
4115 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); 4104 DSS_CLK_SRC_PLL2_1);
4116 4105
4117 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { 4106 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4118 r = dss_mgr_register_framedone_handler(channel, 4107 r = dss_mgr_register_framedone_handler(channel,
@@ -4159,7 +4148,7 @@ err1:
4159 dss_mgr_unregister_framedone_handler(channel, 4148 dss_mgr_unregister_framedone_handler(channel,
4160 dsi_framedone_irq_callback, dsidev); 4149 dsi_framedone_irq_callback, dsidev);
4161err: 4150err:
4162 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4151 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4163 return r; 4152 return r;
4164} 4153}
4165 4154
@@ -4172,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4172 dss_mgr_unregister_framedone_handler(channel, 4161 dss_mgr_unregister_framedone_handler(channel,
4173 dsi_framedone_irq_callback, dsidev); 4162 dsi_framedone_irq_callback, dsidev);
4174 4163
4175 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4164 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4176} 4165}
4177 4166
4178static int dsi_configure_dsi_clocks(struct platform_device *dsidev) 4167static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4206,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4206 goto err1; 4195 goto err1;
4207 4196
4208 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? 4197 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4209 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 4198 DSS_CLK_SRC_PLL1_2 :
4210 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); 4199 DSS_CLK_SRC_PLL2_2);
4211 4200
4212 DSSDBG("PLL OK\n"); 4201 DSSDBG("PLL OK\n");
4213 4202
@@ -4239,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4239err3: 4228err3:
4240 dsi_cio_uninit(dsidev); 4229 dsi_cio_uninit(dsidev);
4241err2: 4230err2:
4242 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4231 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4243err1: 4232err1:
4244 dss_pll_disable(&dsi->pll); 4233 dss_pll_disable(&dsi->pll);
4245err0: 4234err0:
@@ -4261,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4261 dsi_vc_enable(dsidev, 2, 0); 4250 dsi_vc_enable(dsidev, 2, 0);
4262 dsi_vc_enable(dsidev, 3, 0); 4251 dsi_vc_enable(dsidev, 3, 0);
4263 4252
4264 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4253 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4265 dsi_cio_uninit(dsidev); 4254 dsi_cio_uninit(dsidev);
4266 dsi_pll_uninit(dsidev, disconnect_lanes); 4255 dsi_pll_uninit(dsidev, disconnect_lanes);
4267} 4256}
@@ -4462,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
4462 ctx->dsi_cinfo.fint = fint; 4451 ctx->dsi_cinfo.fint = fint;
4463 ctx->dsi_cinfo.clkdco = clkdco; 4452 ctx->dsi_cinfo.clkdco = clkdco;
4464 4453
4465 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4454 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4466 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4455 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4467 dsi_cm_calc_hsdiv_cb, ctx); 4456 dsi_cm_calc_hsdiv_cb, ctx);
4468} 4457}
@@ -4501,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
4501 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); 4490 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4502 pll_max = cfg->hs_clk_max * 4; 4491 pll_max = cfg->hs_clk_max * 4;
4503 4492
4504 return dss_pll_calc(ctx->pll, clkin, 4493 return dss_pll_calc_a(ctx->pll, clkin,
4505 pll_min, pll_max, 4494 pll_min, pll_max,
4506 dsi_cm_calc_pll_cb, ctx); 4495 dsi_cm_calc_pll_cb, ctx);
4507} 4496}
@@ -4760,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
4760 ctx->dsi_cinfo.fint = fint; 4749 ctx->dsi_cinfo.fint = fint;
4761 ctx->dsi_cinfo.clkdco = clkdco; 4750 ctx->dsi_cinfo.clkdco = clkdco;
4762 4751
4763 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4752 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4764 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4753 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4765 dsi_vm_calc_hsdiv_cb, ctx); 4754 dsi_vm_calc_hsdiv_cb, ctx);
4766} 4755}
@@ -4802,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
4802 pll_max = byteclk_max * 4 * 4; 4791 pll_max = byteclk_max * 4 * 4;
4803 } 4792 }
4804 4793
4805 return dss_pll_calc(ctx->pll, clkin, 4794 return dss_pll_calc_a(ctx->pll, clkin,
4806 pll_min, pll_max, 4795 pll_min, pll_max,
4807 dsi_vm_calc_pll_cb, ctx); 4796 dsi_vm_calc_pll_cb, ctx);
4808} 4797}
@@ -5148,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
5148}; 5137};
5149 5138
5150static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { 5139static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5140 .type = DSS_PLL_TYPE_A,
5141
5151 .n_max = (1 << 7) - 1, 5142 .n_max = (1 << 7) - 1,
5152 .m_max = (1 << 11) - 1, 5143 .m_max = (1 << 11) - 1,
5153 .mX_max = (1 << 4) - 1, 5144 .mX_max = (1 << 4) - 1,
@@ -5173,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5173}; 5164};
5174 5165
5175static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { 5166static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5167 .type = DSS_PLL_TYPE_A,
5168
5176 .n_max = (1 << 8) - 1, 5169 .n_max = (1 << 8) - 1,
5177 .m_max = (1 << 12) - 1, 5170 .m_max = (1 << 12) - 1,
5178 .mX_max = (1 << 5) - 1, 5171 .mX_max = (1 << 5) - 1,
@@ -5198,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5198}; 5191};
5199 5192
5200static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { 5193static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5194 .type = DSS_PLL_TYPE_A,
5195
5201 .n_max = (1 << 8) - 1, 5196 .n_max = (1 << 8) - 1,
5202 .m_max = (1 << 12) - 1, 5197 .m_max = (1 << 12) - 1,
5203 .mX_max = (1 << 5) - 1, 5198 .mX_max = (1 << 5) - 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index bf407b6ba15c..dfd4e9621e3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -18,8 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include "omapdss.h"
22
23#include "dss.h" 22#include "dss.h"
24 23
25struct device_node * 24struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index f95ff319e68e..14887d5b02e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/pinctrl/consumer.h>
33#include <linux/platform_device.h> 34#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
35#include <linux/gfp.h> 36#include <linux/gfp.h>
@@ -41,8 +42,7 @@
41#include <linux/suspend.h> 42#include <linux/suspend.h>
42#include <linux/component.h> 43#include <linux/component.h>
43 44
44#include <video/omapdss.h> 45#include "omapdss.h"
45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
48 48
@@ -75,6 +75,8 @@ struct dss_features {
75 const enum omap_display_type *ports; 75 const enum omap_display_type *ports;
76 int num_ports; 76 int num_ports;
77 int (*dpi_select_source)(int port, enum omap_channel channel); 77 int (*dpi_select_source)(int port, enum omap_channel channel);
78 int (*select_lcd_source)(enum omap_channel channel,
79 enum dss_clk_source clk_src);
78}; 80};
79 81
80static struct { 82static struct {
@@ -91,9 +93,9 @@ static struct {
91 unsigned long cache_prate; 93 unsigned long cache_prate;
92 struct dispc_clock_info cache_dispc_cinfo; 94 struct dispc_clock_info cache_dispc_cinfo;
93 95
94 enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; 96 enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
95 enum omap_dss_clk_source dispc_clk_source; 97 enum dss_clk_source dispc_clk_source;
96 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 98 enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
97 99
98 bool ctx_valid; 100 bool ctx_valid;
99 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 101 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -105,11 +107,14 @@ static struct {
105} dss; 107} dss;
106 108
107static const char * const dss_generic_clk_source_names[] = { 109static const char * const dss_generic_clk_source_names[] = {
108 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", 110 [DSS_CLK_SRC_FCK] = "FCK",
109 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", 111 [DSS_CLK_SRC_PLL1_1] = "PLL1:1",
110 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", 112 [DSS_CLK_SRC_PLL1_2] = "PLL1:2",
111 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", 113 [DSS_CLK_SRC_PLL1_3] = "PLL1:3",
112 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", 114 [DSS_CLK_SRC_PLL2_1] = "PLL2:1",
115 [DSS_CLK_SRC_PLL2_2] = "PLL2:2",
116 [DSS_CLK_SRC_PLL2_3] = "PLL2:3",
117 [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
113}; 118};
114 119
115static bool dss_initialized; 120static bool dss_initialized;
@@ -202,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
202 1 << shift, val << shift); 207 1 << shift, val << shift);
203} 208}
204 209
205void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, 210static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
206 enum omap_channel channel) 211 enum omap_channel channel)
207{ 212{
208 unsigned shift, val; 213 unsigned shift, val;
209 214
210 if (!dss.syscon_pll_ctrl) 215 if (!dss.syscon_pll_ctrl)
211 return; 216 return -EINVAL;
212 217
213 switch (channel) { 218 switch (channel) {
214 case OMAP_DSS_CHANNEL_LCD: 219 case OMAP_DSS_CHANNEL_LCD:
215 shift = 3; 220 shift = 3;
216 221
217 switch (pll_id) { 222 switch (clk_src) {
218 case DSS_PLL_VIDEO1: 223 case DSS_CLK_SRC_PLL1_1:
219 val = 0; break; 224 val = 0; break;
220 case DSS_PLL_HDMI: 225 case DSS_CLK_SRC_HDMI_PLL:
221 val = 1; break; 226 val = 1; break;
222 default: 227 default:
223 DSSERR("error in PLL mux config for LCD\n"); 228 DSSERR("error in PLL mux config for LCD\n");
224 return; 229 return -EINVAL;
225 } 230 }
226 231
227 break; 232 break;
228 case OMAP_DSS_CHANNEL_LCD2: 233 case OMAP_DSS_CHANNEL_LCD2:
229 shift = 5; 234 shift = 5;
230 235
231 switch (pll_id) { 236 switch (clk_src) {
232 case DSS_PLL_VIDEO1: 237 case DSS_CLK_SRC_PLL1_3:
233 val = 0; break; 238 val = 0; break;
234 case DSS_PLL_VIDEO2: 239 case DSS_CLK_SRC_PLL2_3:
235 val = 1; break; 240 val = 1; break;
236 case DSS_PLL_HDMI: 241 case DSS_CLK_SRC_HDMI_PLL:
237 val = 2; break; 242 val = 2; break;
238 default: 243 default:
239 DSSERR("error in PLL mux config for LCD2\n"); 244 DSSERR("error in PLL mux config for LCD2\n");
240 return; 245 return -EINVAL;
241 } 246 }
242 247
243 break; 248 break;
244 case OMAP_DSS_CHANNEL_LCD3: 249 case OMAP_DSS_CHANNEL_LCD3:
245 shift = 7; 250 shift = 7;
246 251
247 switch (pll_id) { 252 switch (clk_src) {
248 case DSS_PLL_VIDEO1: 253 case DSS_CLK_SRC_PLL2_1:
249 val = 1; break;
250 case DSS_PLL_VIDEO2:
251 val = 0; break; 254 val = 0; break;
252 case DSS_PLL_HDMI: 255 case DSS_CLK_SRC_PLL1_3:
256 val = 1; break;
257 case DSS_CLK_SRC_HDMI_PLL:
253 val = 2; break; 258 val = 2; break;
254 default: 259 default:
255 DSSERR("error in PLL mux config for LCD3\n"); 260 DSSERR("error in PLL mux config for LCD3\n");
256 return; 261 return -EINVAL;
257 } 262 }
258 263
259 break; 264 break;
260 default: 265 default:
261 DSSERR("error in PLL mux config\n"); 266 DSSERR("error in PLL mux config\n");
262 return; 267 return -EINVAL;
263 } 268 }
264 269
265 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 270 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
266 0x3 << shift, val << shift); 271 0x3 << shift, val << shift);
272
273 return 0;
267} 274}
268 275
269void dss_sdi_init(int datapairs) 276void dss_sdi_init(int datapairs)
@@ -353,14 +360,14 @@ void dss_sdi_disable(void)
353 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ 360 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
354} 361}
355 362
356const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) 363const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
357{ 364{
358 return dss_generic_clk_source_names[clk_src]; 365 return dss_generic_clk_source_names[clk_src];
359} 366}
360 367
361void dss_dump_clocks(struct seq_file *s) 368void dss_dump_clocks(struct seq_file *s)
362{ 369{
363 const char *fclk_name, *fclk_real_name; 370 const char *fclk_name;
364 unsigned long fclk_rate; 371 unsigned long fclk_rate;
365 372
366 if (dss_runtime_get()) 373 if (dss_runtime_get())
@@ -368,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
368 375
369 seq_printf(s, "- DSS -\n"); 376 seq_printf(s, "- DSS -\n");
370 377
371 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); 378 fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
372 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
373 fclk_rate = clk_get_rate(dss.dss_clk); 379 fclk_rate = clk_get_rate(dss.dss_clk);
374 380
375 seq_printf(s, "%s (%s) = %lu\n", 381 seq_printf(s, "%s = %lu\n",
376 fclk_name, fclk_real_name, 382 fclk_name,
377 fclk_rate); 383 fclk_rate);
378 384
379 dss_runtime_put(); 385 dss_runtime_put();
@@ -402,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
402#undef DUMPREG 408#undef DUMPREG
403} 409}
404 410
405static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) 411static int dss_get_channel_index(enum omap_channel channel)
412{
413 switch (channel) {
414 case OMAP_DSS_CHANNEL_LCD:
415 return 0;
416 case OMAP_DSS_CHANNEL_LCD2:
417 return 1;
418 case OMAP_DSS_CHANNEL_LCD3:
419 return 2;
420 default:
421 WARN_ON(1);
422 return 0;
423 }
424}
425
426static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
406{ 427{
407 int b; 428 int b;
408 u8 start, end; 429 u8 start, end;
409 430
431 /*
432 * We always use PRCM clock as the DISPC func clock, except on DSS3,
433 * where we don't have separate DISPC and LCD clock sources.
434 */
435 if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
436 clk_src != DSS_CLK_SRC_FCK))
437 return;
438
410 switch (clk_src) { 439 switch (clk_src) {
411 case OMAP_DSS_CLK_SRC_FCK: 440 case DSS_CLK_SRC_FCK:
412 b = 0; 441 b = 0;
413 break; 442 break;
414 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 443 case DSS_CLK_SRC_PLL1_1:
415 b = 1; 444 b = 1;
416 break; 445 break;
417 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 446 case DSS_CLK_SRC_PLL2_1:
418 b = 2; 447 b = 2;
419 break; 448 break;
420 default: 449 default:
@@ -430,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
430} 459}
431 460
432void dss_select_dsi_clk_source(int dsi_module, 461void dss_select_dsi_clk_source(int dsi_module,
433 enum omap_dss_clk_source clk_src) 462 enum dss_clk_source clk_src)
434{ 463{
435 int b, pos; 464 int b, pos;
436 465
437 switch (clk_src) { 466 switch (clk_src) {
438 case OMAP_DSS_CLK_SRC_FCK: 467 case DSS_CLK_SRC_FCK:
439 b = 0; 468 b = 0;
440 break; 469 break;
441 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 470 case DSS_CLK_SRC_PLL1_2:
442 BUG_ON(dsi_module != 0); 471 BUG_ON(dsi_module != 0);
443 b = 1; 472 b = 1;
444 break; 473 break;
445 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: 474 case DSS_CLK_SRC_PLL2_2:
446 BUG_ON(dsi_module != 1); 475 BUG_ON(dsi_module != 1);
447 b = 1; 476 b = 1;
448 break; 477 break;
@@ -457,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
457 dss.dsi_clk_source[dsi_module] = clk_src; 486 dss.dsi_clk_source[dsi_module] = clk_src;
458} 487}
459 488
489static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
490 enum dss_clk_source clk_src)
491{
492 const u8 ctrl_bits[] = {
493 [OMAP_DSS_CHANNEL_LCD] = 0,
494 [OMAP_DSS_CHANNEL_LCD2] = 12,
495 [OMAP_DSS_CHANNEL_LCD3] = 19,
496 };
497
498 u8 ctrl_bit = ctrl_bits[channel];
499 int r;
500
501 if (clk_src == DSS_CLK_SRC_FCK) {
502 /* LCDx_CLK_SWITCH */
503 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
504 return -EINVAL;
505 }
506
507 r = dss_ctrl_pll_set_control_mux(clk_src, channel);
508 if (r)
509 return r;
510
511 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
512
513 return 0;
514}
515
516static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
517 enum dss_clk_source clk_src)
518{
519 const u8 ctrl_bits[] = {
520 [OMAP_DSS_CHANNEL_LCD] = 0,
521 [OMAP_DSS_CHANNEL_LCD2] = 12,
522 [OMAP_DSS_CHANNEL_LCD3] = 19,
523 };
524 const enum dss_clk_source allowed_plls[] = {
525 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
526 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
527 [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
528 };
529
530 u8 ctrl_bit = ctrl_bits[channel];
531
532 if (clk_src == DSS_CLK_SRC_FCK) {
533 /* LCDx_CLK_SWITCH */
534 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
535 return -EINVAL;
536 }
537
538 if (WARN_ON(allowed_plls[channel] != clk_src))
539 return -EINVAL;
540
541 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
542
543 return 0;
544}
545
546static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
547 enum dss_clk_source clk_src)
548{
549 const u8 ctrl_bits[] = {
550 [OMAP_DSS_CHANNEL_LCD] = 0,
551 [OMAP_DSS_CHANNEL_LCD2] = 12,
552 };
553 const enum dss_clk_source allowed_plls[] = {
554 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
555 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
556 };
557
558 u8 ctrl_bit = ctrl_bits[channel];
559
560 if (clk_src == DSS_CLK_SRC_FCK) {
561 /* LCDx_CLK_SWITCH */
562 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
563 return 0;
564 }
565
566 if (WARN_ON(allowed_plls[channel] != clk_src))
567 return -EINVAL;
568
569 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
570
571 return 0;
572}
573
460void dss_select_lcd_clk_source(enum omap_channel channel, 574void dss_select_lcd_clk_source(enum omap_channel channel,
461 enum omap_dss_clk_source clk_src) 575 enum dss_clk_source clk_src)
462{ 576{
463 int b, ix, pos; 577 int idx = dss_get_channel_index(channel);
578 int r;
464 579
465 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { 580 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
466 dss_select_dispc_clk_source(clk_src); 581 dss_select_dispc_clk_source(clk_src);
582 dss.lcd_clk_source[idx] = clk_src;
467 return; 583 return;
468 } 584 }
469 585
470 switch (clk_src) { 586 r = dss.feat->select_lcd_source(channel, clk_src);
471 case OMAP_DSS_CLK_SRC_FCK: 587 if (r)
472 b = 0;
473 break;
474 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
475 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
476 b = 1;
477 break;
478 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
479 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
480 channel != OMAP_DSS_CHANNEL_LCD3);
481 b = 1;
482 break;
483 default:
484 BUG();
485 return; 588 return;
486 }
487
488 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
489 (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
490 REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
491 589
492 ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 590 dss.lcd_clk_source[idx] = clk_src;
493 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
494 dss.lcd_clk_source[ix] = clk_src;
495} 591}
496 592
497enum omap_dss_clk_source dss_get_dispc_clk_source(void) 593enum dss_clk_source dss_get_dispc_clk_source(void)
498{ 594{
499 return dss.dispc_clk_source; 595 return dss.dispc_clk_source;
500} 596}
501 597
502enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) 598enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
503{ 599{
504 return dss.dsi_clk_source[dsi_module]; 600 return dss.dsi_clk_source[dsi_module];
505} 601}
506 602
507enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) 603enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
508{ 604{
509 if (dss_has_feature(FEAT_LCD_CLK_SRC)) { 605 if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
510 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 606 int idx = dss_get_channel_index(channel);
511 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); 607 return dss.lcd_clk_source[idx];
512 return dss.lcd_clk_source[ix];
513 } else { 608 } else {
514 /* LCD_CLK source is the same as DISPC_FCLK source for 609 /* LCD_CLK source is the same as DISPC_FCLK source for
515 * OMAP2 and OMAP3 */ 610 * OMAP2 and OMAP3 */
@@ -858,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
858 .dpi_select_source = &dss_dpi_select_source_omap4, 953 .dpi_select_source = &dss_dpi_select_source_omap4,
859 .ports = omap2plus_ports, 954 .ports = omap2plus_ports,
860 .num_ports = ARRAY_SIZE(omap2plus_ports), 955 .num_ports = ARRAY_SIZE(omap2plus_ports),
956 .select_lcd_source = &dss_lcd_clk_mux_omap4,
861}; 957};
862 958
863static const struct dss_features omap54xx_dss_feats = { 959static const struct dss_features omap54xx_dss_feats = {
@@ -867,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
867 .dpi_select_source = &dss_dpi_select_source_omap5, 963 .dpi_select_source = &dss_dpi_select_source_omap5,
868 .ports = omap2plus_ports, 964 .ports = omap2plus_ports,
869 .num_ports = ARRAY_SIZE(omap2plus_ports), 965 .num_ports = ARRAY_SIZE(omap2plus_ports),
966 .select_lcd_source = &dss_lcd_clk_mux_omap5,
870}; 967};
871 968
872static const struct dss_features am43xx_dss_feats = { 969static const struct dss_features am43xx_dss_feats = {
@@ -885,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
885 .dpi_select_source = &dss_dpi_select_source_dra7xx, 982 .dpi_select_source = &dss_dpi_select_source_dra7xx,
886 .ports = dra7xx_ports, 983 .ports = dra7xx_ports,
887 .num_ports = ARRAY_SIZE(dra7xx_ports), 984 .num_ports = ARRAY_SIZE(dra7xx_ports),
985 .select_lcd_source = &dss_lcd_clk_mux_dra7,
888}; 986};
889 987
890static int dss_init_features(struct platform_device *pdev) 988static int dss_init_features(struct platform_device *pdev)
@@ -1142,18 +1240,18 @@ static int dss_bind(struct device *dev)
1142 /* Select DPLL */ 1240 /* Select DPLL */
1143 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); 1241 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
1144 1242
1145 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 1243 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
1146 1244
1147#ifdef CONFIG_OMAP2_DSS_VENC 1245#ifdef CONFIG_OMAP2_DSS_VENC
1148 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ 1246 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
1149 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ 1247 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
1150 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ 1248 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
1151#endif 1249#endif
1152 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1250 dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
1153 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1251 dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
1154 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; 1252 dss.dispc_clk_source = DSS_CLK_SRC_FCK;
1155 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1253 dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
1156 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1254 dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
1157 1255
1158 rev = dss_read_reg(DSS_REVISION); 1256 rev = dss_read_reg(DSS_REVISION);
1159 printk(KERN_INFO "OMAP DSS rev %d.%d\n", 1257 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38e6ab50142d..4fd06dc41cb3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
102 DSS_WB_LCD3_MGR = 7, 102 DSS_WB_LCD3_MGR = 7,
103}; 103};
104 104
105enum dss_clk_source {
106 DSS_CLK_SRC_FCK = 0,
107
108 DSS_CLK_SRC_PLL1_1,
109 DSS_CLK_SRC_PLL1_2,
110 DSS_CLK_SRC_PLL1_3,
111
112 DSS_CLK_SRC_PLL2_1,
113 DSS_CLK_SRC_PLL2_2,
114 DSS_CLK_SRC_PLL2_3,
115
116 DSS_CLK_SRC_HDMI_PLL,
117};
118
105enum dss_pll_id { 119enum dss_pll_id {
106 DSS_PLL_DSI1, 120 DSS_PLL_DSI1,
107 DSS_PLL_DSI2, 121 DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
114 128
115#define DSS_PLL_MAX_HSDIVS 4 129#define DSS_PLL_MAX_HSDIVS 4
116 130
131enum dss_pll_type {
132 DSS_PLL_TYPE_A,
133 DSS_PLL_TYPE_B,
134};
135
117/* 136/*
118 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. 137 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
119 * Type-B PLLs: clkout[0] refers to m2. 138 * Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
140}; 159};
141 160
142struct dss_pll_hw { 161struct dss_pll_hw {
162 enum dss_pll_type type;
163
143 unsigned n_max; 164 unsigned n_max;
144 unsigned m_min; 165 unsigned m_min;
145 unsigned m_max; 166 unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
227int dss_dpi_select_source(int port, enum omap_channel channel); 248int dss_dpi_select_source(int port, enum omap_channel channel);
228void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 249void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
229enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 250enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 251const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
231void dss_dump_clocks(struct seq_file *s); 252void dss_dump_clocks(struct seq_file *s);
232 253
233/* DSS VIDEO PLL */ 254/* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
244#endif 265#endif
245 266
246void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); 267void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
247void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
248 enum omap_channel channel);
249 268
250void dss_sdi_init(int datapairs); 269void dss_sdi_init(int datapairs);
251int dss_sdi_enable(void); 270int dss_sdi_enable(void);
252void dss_sdi_disable(void); 271void dss_sdi_disable(void);
253 272
254void dss_select_dsi_clk_source(int dsi_module, 273void dss_select_dsi_clk_source(int dsi_module,
255 enum omap_dss_clk_source clk_src); 274 enum dss_clk_source clk_src);
256void dss_select_lcd_clk_source(enum omap_channel channel, 275void dss_select_lcd_clk_source(enum omap_channel channel,
257 enum omap_dss_clk_source clk_src); 276 enum dss_clk_source clk_src);
258enum omap_dss_clk_source dss_get_dispc_clk_source(void); 277enum dss_clk_source dss_get_dispc_clk_source(void);
259enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); 278enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
260enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); 279enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
261 280
262void dss_set_venc_output(enum omap_dss_venc_type type); 281void dss_set_venc_output(enum omap_dss_venc_type type);
263void dss_set_dac_pwrdn_bgz(bool enable); 282void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
409int dss_pll_register(struct dss_pll *pll); 428int dss_pll_register(struct dss_pll *pll);
410void dss_pll_unregister(struct dss_pll *pll); 429void dss_pll_unregister(struct dss_pll *pll);
411struct dss_pll *dss_pll_find(const char *name); 430struct dss_pll *dss_pll_find(const char *name);
431struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
432unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
412int dss_pll_enable(struct dss_pll *pll); 433int dss_pll_enable(struct dss_pll *pll);
413void dss_pll_disable(struct dss_pll *pll); 434void dss_pll_disable(struct dss_pll *pll);
414int dss_pll_set_config(struct dss_pll *pll, 435int dss_pll_set_config(struct dss_pll *pll,
415 const struct dss_pll_clock_info *cinfo); 436 const struct dss_pll_clock_info *cinfo);
416 437
417bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 438bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
418 unsigned long out_min, unsigned long out_max, 439 unsigned long out_min, unsigned long out_max,
419 dss_hsdiv_calc_func func, void *data); 440 dss_hsdiv_calc_func func, void *data);
420bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 441bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
421 unsigned long pll_min, unsigned long pll_max, 442 unsigned long pll_min, unsigned long pll_max,
422 dss_pll_calc_func func, void *data); 443 dss_pll_calc_func func, void *data);
444
445bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
446 unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
447
423int dss_pll_write_config_type_a(struct dss_pll *pll, 448int dss_pll_write_config_type_a(struct dss_pll *pll,
424 const struct dss_pll_clock_info *cinfo); 449 const struct dss_pll_clock_info *cinfo);
425int dss_pll_write_config_type_b(struct dss_pll *pll, 450int dss_pll_write_config_type_b(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index c886a2927f73..ee5b93ce2763 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -23,8 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include "omapdss.h"
27
28#include "dss.h" 27#include "dss.h"
29#include "dss_features.h" 28#include "dss_features.h"
30 29
@@ -50,7 +49,6 @@ struct omap_dss_features {
50 const enum omap_dss_output_id *supported_outputs; 49 const enum omap_dss_output_id *supported_outputs;
51 const enum omap_color_mode *supported_color_modes; 50 const enum omap_color_mode *supported_color_modes;
52 const enum omap_overlay_caps *overlay_caps; 51 const enum omap_overlay_caps *overlay_caps;
53 const char * const *clksrc_names;
54 const struct dss_param_range *dss_params; 52 const struct dss_param_range *dss_params;
55 53
56 const enum omap_dss_rotation_type supported_rotation_types; 54 const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
389 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, 387 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
390}; 388};
391 389
392static const char * const omap2_dss_clk_source_names[] = {
393 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
394 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
395 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
396};
397
398static const char * const omap3_dss_clk_source_names[] = {
399 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
400 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
401 [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
402};
403
404static const char * const omap4_dss_clk_source_names[] = {
405 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
406 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
407 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
408 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
409 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
410};
411
412static const char * const omap5_dss_clk_source_names[] = {
413 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
414 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
415 [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
416 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
417 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
418};
419
420static const struct dss_param_range omap2_dss_param_range[] = { 390static const struct dss_param_range omap2_dss_param_range[] = {
421 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, 391 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
422 [FEAT_PARAM_DSS_PCD] = { 2, 255 }, 392 [FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
631 .supported_outputs = omap2_dss_supported_outputs, 601 .supported_outputs = omap2_dss_supported_outputs,
632 .supported_color_modes = omap2_dss_supported_color_modes, 602 .supported_color_modes = omap2_dss_supported_color_modes,
633 .overlay_caps = omap2_dss_overlay_caps, 603 .overlay_caps = omap2_dss_overlay_caps,
634 .clksrc_names = omap2_dss_clk_source_names,
635 .dss_params = omap2_dss_param_range, 604 .dss_params = omap2_dss_param_range,
636 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 605 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
637 .buffer_size_unit = 1, 606 .buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
652 .supported_outputs = omap3430_dss_supported_outputs, 621 .supported_outputs = omap3430_dss_supported_outputs,
653 .supported_color_modes = omap3_dss_supported_color_modes, 622 .supported_color_modes = omap3_dss_supported_color_modes,
654 .overlay_caps = omap3430_dss_overlay_caps, 623 .overlay_caps = omap3430_dss_overlay_caps,
655 .clksrc_names = omap3_dss_clk_source_names,
656 .dss_params = omap3_dss_param_range, 624 .dss_params = omap3_dss_param_range,
657 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 625 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
658 .buffer_size_unit = 1, 626 .buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
676 .supported_outputs = omap3430_dss_supported_outputs, 644 .supported_outputs = omap3430_dss_supported_outputs,
677 .supported_color_modes = omap3_dss_supported_color_modes, 645 .supported_color_modes = omap3_dss_supported_color_modes,
678 .overlay_caps = omap3430_dss_overlay_caps, 646 .overlay_caps = omap3430_dss_overlay_caps,
679 .clksrc_names = omap3_dss_clk_source_names,
680 .dss_params = omap3_dss_param_range, 647 .dss_params = omap3_dss_param_range,
681 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 648 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
682 .buffer_size_unit = 1, 649 .buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
696 .supported_outputs = am43xx_dss_supported_outputs, 663 .supported_outputs = am43xx_dss_supported_outputs,
697 .supported_color_modes = omap3_dss_supported_color_modes, 664 .supported_color_modes = omap3_dss_supported_color_modes,
698 .overlay_caps = omap3430_dss_overlay_caps, 665 .overlay_caps = omap3430_dss_overlay_caps,
699 .clksrc_names = omap2_dss_clk_source_names,
700 .dss_params = am43xx_dss_param_range, 666 .dss_params = am43xx_dss_param_range,
701 .supported_rotation_types = OMAP_DSS_ROT_DMA, 667 .supported_rotation_types = OMAP_DSS_ROT_DMA,
702 .buffer_size_unit = 1, 668 .buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
716 .supported_outputs = omap3630_dss_supported_outputs, 682 .supported_outputs = omap3630_dss_supported_outputs,
717 .supported_color_modes = omap3_dss_supported_color_modes, 683 .supported_color_modes = omap3_dss_supported_color_modes,
718 .overlay_caps = omap3630_dss_overlay_caps, 684 .overlay_caps = omap3630_dss_overlay_caps,
719 .clksrc_names = omap3_dss_clk_source_names,
720 .dss_params = omap3_dss_param_range, 685 .dss_params = omap3_dss_param_range,
721 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 686 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
722 .buffer_size_unit = 1, 687 .buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
738 .supported_outputs = omap4_dss_supported_outputs, 703 .supported_outputs = omap4_dss_supported_outputs,
739 .supported_color_modes = omap4_dss_supported_color_modes, 704 .supported_color_modes = omap4_dss_supported_color_modes,
740 .overlay_caps = omap4_dss_overlay_caps, 705 .overlay_caps = omap4_dss_overlay_caps,
741 .clksrc_names = omap4_dss_clk_source_names,
742 .dss_params = omap4_dss_param_range, 706 .dss_params = omap4_dss_param_range,
743 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 707 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
744 .buffer_size_unit = 16, 708 .buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
759 .supported_outputs = omap4_dss_supported_outputs, 723 .supported_outputs = omap4_dss_supported_outputs,
760 .supported_color_modes = omap4_dss_supported_color_modes, 724 .supported_color_modes = omap4_dss_supported_color_modes,
761 .overlay_caps = omap4_dss_overlay_caps, 725 .overlay_caps = omap4_dss_overlay_caps,
762 .clksrc_names = omap4_dss_clk_source_names,
763 .dss_params = omap4_dss_param_range, 726 .dss_params = omap4_dss_param_range,
764 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 727 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
765 .buffer_size_unit = 16, 728 .buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
780 .supported_outputs = omap4_dss_supported_outputs, 743 .supported_outputs = omap4_dss_supported_outputs,
781 .supported_color_modes = omap4_dss_supported_color_modes, 744 .supported_color_modes = omap4_dss_supported_color_modes,
782 .overlay_caps = omap4_dss_overlay_caps, 745 .overlay_caps = omap4_dss_overlay_caps,
783 .clksrc_names = omap4_dss_clk_source_names,
784 .dss_params = omap4_dss_param_range, 746 .dss_params = omap4_dss_param_range,
785 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 747 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
786 .buffer_size_unit = 16, 748 .buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
801 .supported_outputs = omap5_dss_supported_outputs, 763 .supported_outputs = omap5_dss_supported_outputs,
802 .supported_color_modes = omap4_dss_supported_color_modes, 764 .supported_color_modes = omap4_dss_supported_color_modes,
803 .overlay_caps = omap4_dss_overlay_caps, 765 .overlay_caps = omap4_dss_overlay_caps,
804 .clksrc_names = omap5_dss_clk_source_names,
805 .dss_params = omap5_dss_param_range, 766 .dss_params = omap5_dss_param_range,
806 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 767 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
807 .buffer_size_unit = 16, 768 .buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
859 color_mode; 820 color_mode;
860} 821}
861 822
862const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
863{
864 return omap_current_dss_features->clksrc_names[id];
865}
866
867u32 dss_feat_get_buffer_size_unit(void) 823u32 dss_feat_get_buffer_size_unit(void)
868{ 824{
869 return omap_current_dss_features->buffer_size_unit; 825 return omap_current_dss_features->buffer_size_unit;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 3d67d39f192f..bb4b7f0e642b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); 91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
92bool dss_feat_color_mode_supported(enum omap_plane plane, 92bool dss_feat_color_mode_supported(enum omap_plane plane,
93 enum omap_color_mode color_mode); 93 enum omap_color_mode color_mode);
94const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
95 94
96u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ 95u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
97u32 dss_feat_get_burst_size_unit(void); /* in bytes */ 96u32 dss_feat_get_burst_size_unit(void); /* in bytes */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 53616b02b613..63e711545865 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -23,8 +23,9 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <sound/omap-hdmi-audio.h>
27 27
28#include "omapdss.h"
28#include "dss.h" 29#include "dss.h"
29 30
30/* HDMI Wrapper */ 31/* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
240 241
241 void __iomem *base; 242 void __iomem *base;
242 243
244 struct platform_device *pdev;
243 struct hdmi_wp_data *wp; 245 struct hdmi_wp_data *wp;
244}; 246};
245 247
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
306 308
307/* HDMI PLL funcs */ 309/* HDMI PLL funcs */
308void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); 310void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
309void hdmi_pll_compute(struct hdmi_pll_data *pll,
310 unsigned long target_tmds, struct dss_pll_clock_info *pi);
311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, 311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
312 struct hdmi_wp_data *wp); 312 struct hdmi_wp_data *wp);
313void hdmi_pll_uninit(struct hdmi_pll_data *hpll); 313void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f892ae157ff3..cbd28dfdb86a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -33,9 +33,10 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <video/omapdss.h> 36#include <linux/of.h>
37#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
38 38
39#include "omapdss.h"
39#include "hdmi4_core.h" 40#include "hdmi4_core.h"
40#include "dss.h" 41#include "dss.h"
41#include "dss_features.h" 42#include "dss_features.h"
@@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
100 101
101static int hdmi_init_regulator(void) 102static int hdmi_init_regulator(void)
102{ 103{
103 int r;
104 struct regulator *reg; 104 struct regulator *reg;
105 105
106 if (hdmi.vdda_reg != NULL) 106 if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@ static int hdmi_init_regulator(void)
114 return PTR_ERR(reg); 114 return PTR_ERR(reg);
115 } 115 }
116 116
117 if (regulator_can_change_voltage(reg)) {
118 r = regulator_set_voltage(reg, 1800000, 1800000);
119 if (r) {
120 devm_regulator_put(reg);
121 DSSWARN("can't set the regulator voltage\n");
122 return r;
123 }
124 }
125
126 hdmi.vdda_reg = reg; 117 hdmi.vdda_reg = reg;
127 118
128 return 0; 119 return 0;
@@ -186,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
186 if (p->double_pixel) 177 if (p->double_pixel)
187 pc *= 2; 178 pc *= 2;
188 179
189 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 180 /* DSS_HDMI_TCLK is bitclk / 10 */
181 pc *= 10;
182
183 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
184 pc, &hdmi_cinfo);
190 185
191 r = dss_pll_enable(&hdmi.pll.pll); 186 r = dss_pll_enable(&hdmi.pll.pll);
192 if (r) { 187 if (r) {
@@ -213,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
213 208
214 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 209 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
215 210
216 /* bypass TV gamma table */
217 dispc_enable_gamma_table(0);
218
219 /* tv size */ 211 /* tv size */
220 dss_mgr_set_timings(channel, p); 212 dss_mgr_set_timings(channel, p);
221 213
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index fa72e735dad2..ef3afe99e487 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
211static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) 211static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
212{ 212{
213 DSSDBG("Enter hdmi_core_powerdown_disable\n"); 213 DSSDBG("Enter hdmi_core_powerdown_disable\n");
214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); 214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
215} 215}
216 216
217static void hdmi_core_swreset_release(struct hdmi_core_data *core) 217static void hdmi_core_swreset_release(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a43f7b10e113..0c0a5139a301 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -38,9 +38,10 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <video/omapdss.h> 41#include <linux/of.h>
42#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
43 43
44#include "omapdss.h"
44#include "hdmi5_core.h" 45#include "hdmi5_core.h"
45#include "dss.h" 46#include "dss.h"
46#include "dss_features.h" 47#include "dss_features.h"
@@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
119 120
120static int hdmi_init_regulator(void) 121static int hdmi_init_regulator(void)
121{ 122{
122 int r;
123 struct regulator *reg; 123 struct regulator *reg;
124 124
125 if (hdmi.vdda_reg != NULL) 125 if (hdmi.vdda_reg != NULL)
@@ -131,15 +131,6 @@ static int hdmi_init_regulator(void)
131 return PTR_ERR(reg); 131 return PTR_ERR(reg);
132 } 132 }
133 133
134 if (regulator_can_change_voltage(reg)) {
135 r = regulator_set_voltage(reg, 1800000, 1800000);
136 if (r) {
137 devm_regulator_put(reg);
138 DSSWARN("can't set the regulator voltage\n");
139 return r;
140 }
141 }
142
143 hdmi.vdda_reg = reg; 134 hdmi.vdda_reg = reg;
144 135
145 return 0; 136 return 0;
@@ -198,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
198 if (p->double_pixel) 189 if (p->double_pixel)
199 pc *= 2; 190 pc *= 2;
200 191
201 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 192 /* DSS_HDMI_TCLK is bitclk / 10 */
193 pc *= 10;
194
195 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
196 pc, &hdmi_cinfo);
202 197
203 /* disable and clear irqs */ 198 /* disable and clear irqs */
204 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); 199 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -230,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
230 225
231 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 226 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
232 227
233 /* bypass TV gamma table */
234 dispc_enable_gamma_table(0);
235
236 /* tv size */ 228 /* tv size */
237 dss_mgr_set_timings(channel, p); 229 dss_mgr_set_timings(channel, p);
238 230
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 6a397520cae5..8ab2093daa12 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
51{ 51{
52 void __iomem *base = core->base; 52 void __iomem *base = core->base;
53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ 53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
54 const unsigned ss_scl_high = 4000; /* ns */ 54 const unsigned ss_scl_high = 4600; /* ns */
55 const unsigned ss_scl_low = 4700; /* ns */ 55 const unsigned ss_scl_low = 5400; /* ns */
56 const unsigned fs_scl_high = 600; /* ns */ 56 const unsigned fs_scl_high = 600; /* ns */
57 const unsigned fs_scl_low = 1300; /* ns */ 57 const unsigned fs_scl_low = 1300; /* ns */
58 const unsigned sda_hold = 1000; /* ns */ 58 const unsigned sda_hold = 1000; /* ns */
@@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
458 458
459 c = (ptr[1] >> 6) & 0x3; 459 c = (ptr[1] >> 6) & 0x3;
460 m = (ptr[1] >> 4) & 0x3; 460 m = (ptr[1] >> 4) & 0x3;
461 r = (ptr[1] >> 0) & 0x3; 461 r = (ptr[1] >> 0) & 0xf;
462 462
463 itc = (ptr[2] >> 7) & 0x1; 463 itc = (ptr[2] >> 7) & 0x1;
464 ec = (ptr[2] >> 4) & 0x7; 464 ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 1b8fcc6c4ba1..4dfb67fe5f6d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -4,8 +4,8 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h>
8 7
8#include "omapdss.h"
9#include "hdmi.h" 9#include "hdmi.h"
10 10
11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, 11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 1f5d19c119ce..3ead47cccac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -13,8 +13,9 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <video/omapdss.h> 16#include <linux/seq_file.h>
17 17
18#include "omapdss.h"
18#include "dss.h" 19#include "dss.h"
19#include "hdmi.h" 20#include "hdmi.h"
20 21
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 06e23a7c432c..b8bf6a9e5557 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -16,9 +16,10 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/seq_file.h>
20#include <linux/pm_runtime.h>
19 21
20#include <video/omapdss.h> 22#include "omapdss.h"
21
22#include "dss.h" 23#include "dss.h"
23#include "hdmi.h" 24#include "hdmi.h"
24 25
@@ -38,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
38 DUMPPLL(PLLCTRL_CFG4); 39 DUMPPLL(PLLCTRL_CFG4);
39} 40}
40 41
41void hdmi_pll_compute(struct hdmi_pll_data *pll,
42 unsigned long target_tmds, struct dss_pll_clock_info *pi)
43{
44 unsigned long fint, clkdco, clkout;
45 unsigned long target_bitclk, target_clkdco;
46 unsigned long min_dco;
47 unsigned n, m, mf, m2, sd;
48 unsigned long clkin;
49 const struct dss_pll_hw *hw = pll->pll.hw;
50
51 clkin = clk_get_rate(pll->pll.clkin);
52
53 DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
54
55 target_bitclk = target_tmds * 10;
56
57 /* Fint */
58 n = DIV_ROUND_UP(clkin, hw->fint_max);
59 fint = clkin / n;
60
61 /* adjust m2 so that the clkdco will be high enough */
62 min_dco = roundup(hw->clkdco_min, fint);
63 m2 = DIV_ROUND_UP(min_dco, target_bitclk);
64 if (m2 == 0)
65 m2 = 1;
66
67 target_clkdco = target_bitclk * m2;
68 m = target_clkdco / fint;
69
70 clkdco = fint * m;
71
72 /* adjust clkdco with fractional mf */
73 if (WARN_ON(target_clkdco - clkdco > fint))
74 mf = 0;
75 else
76 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
77
78 if (mf > 0)
79 clkdco += (u32)div_u64((u64)mf * fint, 262144);
80
81 clkout = clkdco / m2;
82
83 /* sigma-delta */
84 sd = DIV_ROUND_UP(fint * m, 250000000);
85
86 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
87 n, m, mf, m2, sd);
88 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
89
90 pi->n = n;
91 pi->m = m;
92 pi->mf = mf;
93 pi->mX[0] = m2;
94 pi->sd = sd;
95
96 pi->fint = fint;
97 pi->clkdco = clkdco;
98 pi->clkout[0] = clkout;
99}
100
101static int hdmi_pll_enable(struct dss_pll *dsspll) 42static int hdmi_pll_enable(struct dss_pll *dsspll)
102{ 43{
103 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 44 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
104 struct hdmi_wp_data *wp = pll->wp; 45 struct hdmi_wp_data *wp = pll->wp;
105 u16 r = 0; 46 int r;
47
48 r = pm_runtime_get_sync(&pll->pdev->dev);
49 WARN_ON(r < 0);
106 50
107 dss_ctrl_pll_enable(DSS_PLL_HDMI, true); 51 dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
108 52
@@ -117,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
117{ 61{
118 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 62 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
119 struct hdmi_wp_data *wp = pll->wp; 63 struct hdmi_wp_data *wp = pll->wp;
64 int r;
120 65
121 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); 66 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
122 67
123 dss_ctrl_pll_enable(DSS_PLL_HDMI, false); 68 dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
69
70 r = pm_runtime_put_sync(&pll->pdev->dev);
71 WARN_ON(r < 0 && r != -ENOSYS);
124} 72}
125 73
126static const struct dss_pll_ops dsi_pll_ops = { 74static const struct dss_pll_ops dsi_pll_ops = {
@@ -130,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
130}; 78};
131 79
132static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { 80static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
81 .type = DSS_PLL_TYPE_B,
82
133 .n_max = 255, 83 .n_max = 255,
134 .m_min = 20, 84 .m_min = 20,
135 .m_max = 4095, 85 .m_max = 4095,
@@ -153,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
153}; 103};
154 104
155static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { 105static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
106 .type = DSS_PLL_TYPE_B,
107
156 .n_max = 255, 108 .n_max = 255,
157 .m_min = 20, 109 .m_min = 20,
158 .m_max = 2045, 110 .m_max = 2045,
@@ -224,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
224 int r; 176 int r;
225 struct resource *res; 177 struct resource *res;
226 178
179 pll->pdev = pdev;
227 pll->wp = wp; 180 pll->wp = wp;
228 181
229 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); 182 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 13442b9052d1..203694a52d18 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -14,8 +14,9 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <video/omapdss.h> 17#include <linux/seq_file.h>
18 18
19#include "omapdss.h"
19#include "dss.h" 20#include "dss.h"
20#include "hdmi.h" 21#include "hdmi.h"
21 22
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index d7e7c909bbc2..6eaf1adbd606 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -18,7 +18,872 @@
18#ifndef __OMAP_DRM_DSS_H 18#ifndef __OMAP_DRM_DSS_H
19#define __OMAP_DRM_DSS_H 19#define __OMAP_DRM_DSS_H
20 20
21#include <video/omapdss.h> 21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/device.h>
24#include <linux/interrupt.h>
25#include <video/videomode.h>
26#include <linux/platform_data/omapdss.h>
27#include <uapi/drm/drm_mode.h>
28
29#define DISPC_IRQ_FRAMEDONE (1 << 0)
30#define DISPC_IRQ_VSYNC (1 << 1)
31#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
32#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
33#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
34#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
35#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
36#define DISPC_IRQ_GFX_END_WIN (1 << 7)
37#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
38#define DISPC_IRQ_OCP_ERR (1 << 9)
39#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
40#define DISPC_IRQ_VID1_END_WIN (1 << 11)
41#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
42#define DISPC_IRQ_VID2_END_WIN (1 << 13)
43#define DISPC_IRQ_SYNC_LOST (1 << 14)
44#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
45#define DISPC_IRQ_WAKEUP (1 << 16)
46#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
47#define DISPC_IRQ_VSYNC2 (1 << 18)
48#define DISPC_IRQ_VID3_END_WIN (1 << 19)
49#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
50#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
51#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
52#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
53#define DISPC_IRQ_FRAMEDONETV (1 << 24)
54#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
55#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
56#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
57#define DISPC_IRQ_VSYNC3 (1 << 28)
58#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
59#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
60
61struct omap_dss_device;
62struct omap_overlay_manager;
63struct dss_lcd_mgr_config;
64struct snd_aes_iec958;
65struct snd_cea_861_aud_if;
66struct hdmi_avi_infoframe;
67
68enum omap_display_type {
69 OMAP_DISPLAY_TYPE_NONE = 0,
70 OMAP_DISPLAY_TYPE_DPI = 1 << 0,
71 OMAP_DISPLAY_TYPE_DBI = 1 << 1,
72 OMAP_DISPLAY_TYPE_SDI = 1 << 2,
73 OMAP_DISPLAY_TYPE_DSI = 1 << 3,
74 OMAP_DISPLAY_TYPE_VENC = 1 << 4,
75 OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
76 OMAP_DISPLAY_TYPE_DVI = 1 << 6,
77};
78
79enum omap_plane {
80 OMAP_DSS_GFX = 0,
81 OMAP_DSS_VIDEO1 = 1,
82 OMAP_DSS_VIDEO2 = 2,
83 OMAP_DSS_VIDEO3 = 3,
84 OMAP_DSS_WB = 4,
85};
86
87enum omap_channel {
88 OMAP_DSS_CHANNEL_LCD = 0,
89 OMAP_DSS_CHANNEL_DIGIT = 1,
90 OMAP_DSS_CHANNEL_LCD2 = 2,
91 OMAP_DSS_CHANNEL_LCD3 = 3,
92 OMAP_DSS_CHANNEL_WB = 4,
93};
94
95enum omap_color_mode {
96 OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
97 OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
98 OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
99 OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
100 OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
101 OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
102 OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
103 OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
104 OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
105 OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
106 OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
107 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
108 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
109 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
110 OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
111 OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
112 OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
113 OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
114 OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
115};
116
117enum omap_dss_load_mode {
118 OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
119 OMAP_DSS_LOAD_CLUT_ONLY = 1,
120 OMAP_DSS_LOAD_FRAME_ONLY = 2,
121 OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
122};
123
124enum omap_dss_trans_key_type {
125 OMAP_DSS_COLOR_KEY_GFX_DST = 0,
126 OMAP_DSS_COLOR_KEY_VID_SRC = 1,
127};
128
129enum omap_rfbi_te_mode {
130 OMAP_DSS_RFBI_TE_MODE_1 = 1,
131 OMAP_DSS_RFBI_TE_MODE_2 = 2,
132};
133
134enum omap_dss_signal_level {
135 OMAPDSS_SIG_ACTIVE_LOW,
136 OMAPDSS_SIG_ACTIVE_HIGH,
137};
138
139enum omap_dss_signal_edge {
140 OMAPDSS_DRIVE_SIG_FALLING_EDGE,
141 OMAPDSS_DRIVE_SIG_RISING_EDGE,
142};
143
144enum omap_dss_venc_type {
145 OMAP_DSS_VENC_TYPE_COMPOSITE,
146 OMAP_DSS_VENC_TYPE_SVIDEO,
147};
148
149enum omap_dss_dsi_pixel_format {
150 OMAP_DSS_DSI_FMT_RGB888,
151 OMAP_DSS_DSI_FMT_RGB666,
152 OMAP_DSS_DSI_FMT_RGB666_PACKED,
153 OMAP_DSS_DSI_FMT_RGB565,
154};
155
156enum omap_dss_dsi_mode {
157 OMAP_DSS_DSI_CMD_MODE = 0,
158 OMAP_DSS_DSI_VIDEO_MODE,
159};
160
161enum omap_display_caps {
162 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
163 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
164};
165
166enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_DISABLED = 0,
168 OMAP_DSS_DISPLAY_ACTIVE,
169};
170
171enum omap_dss_rotation_type {
172 OMAP_DSS_ROT_DMA = 1 << 0,
173 OMAP_DSS_ROT_VRFB = 1 << 1,
174 OMAP_DSS_ROT_TILER = 1 << 2,
175};
176
177/* clockwise rotation angle */
178enum omap_dss_rotation_angle {
179 OMAP_DSS_ROT_0 = 0,
180 OMAP_DSS_ROT_90 = 1,
181 OMAP_DSS_ROT_180 = 2,
182 OMAP_DSS_ROT_270 = 3,
183};
184
185enum omap_overlay_caps {
186 OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
187 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
188 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
189 OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
190 OMAP_DSS_OVL_CAP_POS = 1 << 4,
191 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
192};
193
194enum omap_overlay_manager_caps {
195 OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
196};
197
198enum omap_dss_clk_source {
199 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
200 * OMAP4: DSS_FCLK */
201 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
202 * OMAP4: PLL1_CLK1 */
203 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
204 * OMAP4: PLL1_CLK2 */
205 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
206 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
207};
208
209enum omap_hdmi_flags {
210 OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
211};
212
213enum omap_dss_output_id {
214 OMAP_DSS_OUTPUT_DPI = 1 << 0,
215 OMAP_DSS_OUTPUT_DBI = 1 << 1,
216 OMAP_DSS_OUTPUT_SDI = 1 << 2,
217 OMAP_DSS_OUTPUT_DSI1 = 1 << 3,
218 OMAP_DSS_OUTPUT_DSI2 = 1 << 4,
219 OMAP_DSS_OUTPUT_VENC = 1 << 5,
220 OMAP_DSS_OUTPUT_HDMI = 1 << 6,
221};
222
223/* RFBI */
224
225struct rfbi_timings {
226 int cs_on_time;
227 int cs_off_time;
228 int we_on_time;
229 int we_off_time;
230 int re_on_time;
231 int re_off_time;
232 int we_cycle_time;
233 int re_cycle_time;
234 int cs_pulse_width;
235 int access_time;
236
237 int clk_div;
238
239 u32 tim[5]; /* set by rfbi_convert_timings() */
240
241 int converted;
242};
243
244/* DSI */
245
246enum omap_dss_dsi_trans_mode {
247 /* Sync Pulses: both sync start and end packets sent */
248 OMAP_DSS_DSI_PULSE_MODE,
249 /* Sync Events: only sync start packets sent */
250 OMAP_DSS_DSI_EVENT_MODE,
251 /* Burst: only sync start packets sent, pixels are time compressed */
252 OMAP_DSS_DSI_BURST_MODE,
253};
254
255struct omap_dss_dsi_videomode_timings {
256 unsigned long hsclk;
257
258 unsigned ndl;
259 unsigned bitspp;
260
261 /* pixels */
262 u16 hact;
263 /* lines */
264 u16 vact;
265
266 /* DSI video mode blanking data */
267 /* Unit: byte clock cycles */
268 u16 hss;
269 u16 hsa;
270 u16 hse;
271 u16 hfp;
272 u16 hbp;
273 /* Unit: line clocks */
274 u16 vsa;
275 u16 vfp;
276 u16 vbp;
277
278 /* DSI blanking modes */
279 int blanking_mode;
280 int hsa_blanking_mode;
281 int hbp_blanking_mode;
282 int hfp_blanking_mode;
283
284 enum omap_dss_dsi_trans_mode trans_mode;
285
286 bool ddr_clk_always_on;
287 int window_sync;
288};
289
290struct omap_dss_dsi_config {
291 enum omap_dss_dsi_mode mode;
292 enum omap_dss_dsi_pixel_format pixel_format;
293 const struct omap_video_timings *timings;
294
295 unsigned long hs_clk_min, hs_clk_max;
296 unsigned long lp_clk_min, lp_clk_max;
297
298 bool ddr_clk_always_on;
299 enum omap_dss_dsi_trans_mode trans_mode;
300};
301
302struct omap_video_timings {
303 /* Unit: pixels */
304 u16 x_res;
305 /* Unit: pixels */
306 u16 y_res;
307 /* Unit: Hz */
308 u32 pixelclock;
309 /* Unit: pixel clocks */
310 u16 hsw; /* Horizontal synchronization pulse width */
311 /* Unit: pixel clocks */
312 u16 hfp; /* Horizontal front porch */
313 /* Unit: pixel clocks */
314 u16 hbp; /* Horizontal back porch */
315 /* Unit: line clocks */
316 u16 vsw; /* Vertical synchronization pulse width */
317 /* Unit: line clocks */
318 u16 vfp; /* Vertical front porch */
319 /* Unit: line clocks */
320 u16 vbp; /* Vertical back porch */
321
322 /* Vsync logic level */
323 enum omap_dss_signal_level vsync_level;
324 /* Hsync logic level */
325 enum omap_dss_signal_level hsync_level;
326 /* Interlaced or Progressive timings */
327 bool interlace;
328 /* Pixel clock edge to drive LCD data */
329 enum omap_dss_signal_edge data_pclk_edge;
330 /* Data enable logic level */
331 enum omap_dss_signal_level de_level;
332 /* Pixel clock edges to drive HSYNC and VSYNC signals */
333 enum omap_dss_signal_edge sync_pclk_edge;
334
335 bool double_pixel;
336};
337
338/* Hardcoded timings for tv modes. Venc only uses these to
339 * identify the mode, and does not actually use the configs
340 * itself. However, the configs should be something that
341 * a normal monitor can also show */
342extern const struct omap_video_timings omap_dss_pal_timings;
343extern const struct omap_video_timings omap_dss_ntsc_timings;
344
345struct omap_dss_cpr_coefs {
346 s16 rr, rg, rb;
347 s16 gr, gg, gb;
348 s16 br, bg, bb;
349};
350
351struct omap_overlay_info {
352 dma_addr_t paddr;
353 dma_addr_t p_uv_addr; /* for NV12 format */
354 u16 screen_width;
355 u16 width;
356 u16 height;
357 enum omap_color_mode color_mode;
358 u8 rotation;
359 enum omap_dss_rotation_type rotation_type;
360 bool mirror;
361
362 u16 pos_x;
363 u16 pos_y;
364 u16 out_width; /* if 0, out_width == width */
365 u16 out_height; /* if 0, out_height == height */
366 u8 global_alpha;
367 u8 pre_mult_alpha;
368 u8 zorder;
369};
370
371struct omap_overlay {
372 struct kobject kobj;
373 struct list_head list;
374
375 /* static fields */
376 const char *name;
377 enum omap_plane id;
378 enum omap_color_mode supported_modes;
379 enum omap_overlay_caps caps;
380
381 /* dynamic fields */
382 struct omap_overlay_manager *manager;
383
384 /*
385 * The following functions do not block:
386 *
387 * is_enabled
388 * set_overlay_info
389 * get_overlay_info
390 *
391 * The rest of the functions may block and cannot be called from
392 * interrupt context
393 */
394
395 int (*enable)(struct omap_overlay *ovl);
396 int (*disable)(struct omap_overlay *ovl);
397 bool (*is_enabled)(struct omap_overlay *ovl);
398
399 int (*set_manager)(struct omap_overlay *ovl,
400 struct omap_overlay_manager *mgr);
401 int (*unset_manager)(struct omap_overlay *ovl);
402
403 int (*set_overlay_info)(struct omap_overlay *ovl,
404 struct omap_overlay_info *info);
405 void (*get_overlay_info)(struct omap_overlay *ovl,
406 struct omap_overlay_info *info);
407
408 int (*wait_for_go)(struct omap_overlay *ovl);
409
410 struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
411};
412
413struct omap_overlay_manager_info {
414 u32 default_color;
415
416 enum omap_dss_trans_key_type trans_key_type;
417 u32 trans_key;
418 bool trans_enabled;
419
420 bool partial_alpha_enabled;
421
422 bool cpr_enable;
423 struct omap_dss_cpr_coefs cpr_coefs;
424};
425
426struct omap_overlay_manager {
427 struct kobject kobj;
428
429 /* static fields */
430 const char *name;
431 enum omap_channel id;
432 enum omap_overlay_manager_caps caps;
433 struct list_head overlays;
434 enum omap_display_type supported_displays;
435 enum omap_dss_output_id supported_outputs;
436
437 /* dynamic fields */
438 struct omap_dss_device *output;
439
440 /*
441 * The following functions do not block:
442 *
443 * set_manager_info
444 * get_manager_info
445 * apply
446 *
447 * The rest of the functions may block and cannot be called from
448 * interrupt context
449 */
450
451 int (*set_output)(struct omap_overlay_manager *mgr,
452 struct omap_dss_device *output);
453 int (*unset_output)(struct omap_overlay_manager *mgr);
454
455 int (*set_manager_info)(struct omap_overlay_manager *mgr,
456 struct omap_overlay_manager_info *info);
457 void (*get_manager_info)(struct omap_overlay_manager *mgr,
458 struct omap_overlay_manager_info *info);
459
460 int (*apply)(struct omap_overlay_manager *mgr);
461 int (*wait_for_go)(struct omap_overlay_manager *mgr);
462 int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
463
464 struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
465};
466
467/* 22 pins means 1 clk lane and 10 data lanes */
468#define OMAP_DSS_MAX_DSI_PINS 22
469
470struct omap_dsi_pin_config {
471 int num_pins;
472 /*
473 * pin numbers in the following order:
474 * clk+, clk-
475 * data1+, data1-
476 * data2+, data2-
477 * ...
478 */
479 int pins[OMAP_DSS_MAX_DSI_PINS];
480};
481
482struct omap_dss_writeback_info {
483 u32 paddr;
484 u32 p_uv_addr;
485 u16 buf_width;
486 u16 width;
487 u16 height;
488 enum omap_color_mode color_mode;
489 u8 rotation;
490 enum omap_dss_rotation_type rotation_type;
491 bool mirror;
492 u8 pre_mult_alpha;
493};
494
495struct omapdss_dpi_ops {
496 int (*connect)(struct omap_dss_device *dssdev,
497 struct omap_dss_device *dst);
498 void (*disconnect)(struct omap_dss_device *dssdev,
499 struct omap_dss_device *dst);
500
501 int (*enable)(struct omap_dss_device *dssdev);
502 void (*disable)(struct omap_dss_device *dssdev);
503
504 int (*check_timings)(struct omap_dss_device *dssdev,
505 struct omap_video_timings *timings);
506 void (*set_timings)(struct omap_dss_device *dssdev,
507 struct omap_video_timings *timings);
508 void (*get_timings)(struct omap_dss_device *dssdev,
509 struct omap_video_timings *timings);
510
511 void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
512};
513
514struct omapdss_sdi_ops {
515 int (*connect)(struct omap_dss_device *dssdev,
516 struct omap_dss_device *dst);
517 void (*disconnect)(struct omap_dss_device *dssdev,
518 struct omap_dss_device *dst);
519
520 int (*enable)(struct omap_dss_device *dssdev);
521 void (*disable)(struct omap_dss_device *dssdev);
522
523 int (*check_timings)(struct omap_dss_device *dssdev,
524 struct omap_video_timings *timings);
525 void (*set_timings)(struct omap_dss_device *dssdev,
526 struct omap_video_timings *timings);
527 void (*get_timings)(struct omap_dss_device *dssdev,
528 struct omap_video_timings *timings);
529
530 void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
531};
532
533struct omapdss_dvi_ops {
534 int (*connect)(struct omap_dss_device *dssdev,
535 struct omap_dss_device *dst);
536 void (*disconnect)(struct omap_dss_device *dssdev,
537 struct omap_dss_device *dst);
538
539 int (*enable)(struct omap_dss_device *dssdev);
540 void (*disable)(struct omap_dss_device *dssdev);
541
542 int (*check_timings)(struct omap_dss_device *dssdev,
543 struct omap_video_timings *timings);
544 void (*set_timings)(struct omap_dss_device *dssdev,
545 struct omap_video_timings *timings);
546 void (*get_timings)(struct omap_dss_device *dssdev,
547 struct omap_video_timings *timings);
548};
549
550struct omapdss_atv_ops {
551 int (*connect)(struct omap_dss_device *dssdev,
552 struct omap_dss_device *dst);
553 void (*disconnect)(struct omap_dss_device *dssdev,
554 struct omap_dss_device *dst);
555
556 int (*enable)(struct omap_dss_device *dssdev);
557 void (*disable)(struct omap_dss_device *dssdev);
558
559 int (*check_timings)(struct omap_dss_device *dssdev,
560 struct omap_video_timings *timings);
561 void (*set_timings)(struct omap_dss_device *dssdev,
562 struct omap_video_timings *timings);
563 void (*get_timings)(struct omap_dss_device *dssdev,
564 struct omap_video_timings *timings);
565
566 void (*set_type)(struct omap_dss_device *dssdev,
567 enum omap_dss_venc_type type);
568 void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
569 bool invert_polarity);
570
571 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
572 u32 (*get_wss)(struct omap_dss_device *dssdev);
573};
574
575struct omapdss_hdmi_ops {
576 int (*connect)(struct omap_dss_device *dssdev,
577 struct omap_dss_device *dst);
578 void (*disconnect)(struct omap_dss_device *dssdev,
579 struct omap_dss_device *dst);
580
581 int (*enable)(struct omap_dss_device *dssdev);
582 void (*disable)(struct omap_dss_device *dssdev);
583
584 int (*check_timings)(struct omap_dss_device *dssdev,
585 struct omap_video_timings *timings);
586 void (*set_timings)(struct omap_dss_device *dssdev,
587 struct omap_video_timings *timings);
588 void (*get_timings)(struct omap_dss_device *dssdev,
589 struct omap_video_timings *timings);
590
591 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
592 bool (*detect)(struct omap_dss_device *dssdev);
593
594 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
595 int (*set_infoframe)(struct omap_dss_device *dssdev,
596 const struct hdmi_avi_infoframe *avi);
597};
598
599struct omapdss_dsi_ops {
600 int (*connect)(struct omap_dss_device *dssdev,
601 struct omap_dss_device *dst);
602 void (*disconnect)(struct omap_dss_device *dssdev,
603 struct omap_dss_device *dst);
604
605 int (*enable)(struct omap_dss_device *dssdev);
606 void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
607 bool enter_ulps);
608
609 /* bus configuration */
610 int (*set_config)(struct omap_dss_device *dssdev,
611 const struct omap_dss_dsi_config *cfg);
612 int (*configure_pins)(struct omap_dss_device *dssdev,
613 const struct omap_dsi_pin_config *pin_cfg);
614
615 void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
616 bool enable);
617 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
618
619 int (*update)(struct omap_dss_device *dssdev, int channel,
620 void (*callback)(int, void *), void *data);
621
622 void (*bus_lock)(struct omap_dss_device *dssdev);
623 void (*bus_unlock)(struct omap_dss_device *dssdev);
624
625 int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
626 void (*disable_video_output)(struct omap_dss_device *dssdev,
627 int channel);
628
629 int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
630 int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
631 int vc_id);
632 void (*release_vc)(struct omap_dss_device *dssdev, int channel);
633
634 /* data transfer */
635 int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
636 u8 *data, int len);
637 int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
638 u8 *data, int len);
639 int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
640 u8 *data, int len);
641
642 int (*gen_write)(struct omap_dss_device *dssdev, int channel,
643 u8 *data, int len);
644 int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
645 u8 *data, int len);
646 int (*gen_read)(struct omap_dss_device *dssdev, int channel,
647 u8 *reqdata, int reqlen,
648 u8 *data, int len);
649
650 int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
651
652 int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
653 int channel, u16 plen);
654};
655
656struct omap_dss_device {
657 struct kobject kobj;
658 struct device *dev;
659
660 struct module *owner;
661
662 struct list_head panel_list;
663
664 /* alias in the form of "display%d" */
665 char alias[16];
666
667 enum omap_display_type type;
668 enum omap_display_type output_type;
669
670 union {
671 struct {
672 u8 data_lines;
673 } dpi;
674
675 struct {
676 u8 channel;
677 u8 data_lines;
678 } rfbi;
679
680 struct {
681 u8 datapairs;
682 } sdi;
683
684 struct {
685 int module;
686 } dsi;
687
688 struct {
689 enum omap_dss_venc_type type;
690 bool invert_polarity;
691 } venc;
692 } phy;
693
694 struct {
695 struct omap_video_timings timings;
696
697 enum omap_dss_dsi_pixel_format dsi_pix_fmt;
698 enum omap_dss_dsi_mode dsi_mode;
699 } panel;
700
701 struct {
702 u8 pixel_size;
703 struct rfbi_timings rfbi_timings;
704 } ctrl;
705
706 const char *name;
707
708 /* used to match device to driver */
709 const char *driver_name;
710
711 void *data;
712
713 struct omap_dss_driver *driver;
714
715 union {
716 const struct omapdss_dpi_ops *dpi;
717 const struct omapdss_sdi_ops *sdi;
718 const struct omapdss_dvi_ops *dvi;
719 const struct omapdss_hdmi_ops *hdmi;
720 const struct omapdss_atv_ops *atv;
721 const struct omapdss_dsi_ops *dsi;
722 } ops;
723
724 /* helper variable for driver suspend/resume */
725 bool activate_after_resume;
726
727 enum omap_display_caps caps;
728
729 struct omap_dss_device *src;
730
731 enum omap_dss_display_state state;
732
733 /* OMAP DSS output specific fields */
734
735 struct list_head list;
736
737 /* DISPC channel for this output */
738 enum omap_channel dispc_channel;
739 bool dispc_channel_connected;
740
741 /* output instance */
742 enum omap_dss_output_id id;
743
744 /* the port number in the DT node */
745 int port_num;
746
747 /* dynamic fields */
748 struct omap_overlay_manager *manager;
749
750 struct omap_dss_device *dst;
751};
752
753struct omap_dss_driver {
754 int (*probe)(struct omap_dss_device *);
755 void (*remove)(struct omap_dss_device *);
756
757 int (*connect)(struct omap_dss_device *dssdev);
758 void (*disconnect)(struct omap_dss_device *dssdev);
759
760 int (*enable)(struct omap_dss_device *display);
761 void (*disable)(struct omap_dss_device *display);
762 int (*run_test)(struct omap_dss_device *display, int test);
763
764 int (*update)(struct omap_dss_device *dssdev,
765 u16 x, u16 y, u16 w, u16 h);
766 int (*sync)(struct omap_dss_device *dssdev);
767
768 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
769 int (*get_te)(struct omap_dss_device *dssdev);
770
771 u8 (*get_rotate)(struct omap_dss_device *dssdev);
772 int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
773
774 bool (*get_mirror)(struct omap_dss_device *dssdev);
775 int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
776
777 int (*memory_read)(struct omap_dss_device *dssdev,
778 void *buf, size_t size,
779 u16 x, u16 y, u16 w, u16 h);
780
781 void (*get_resolution)(struct omap_dss_device *dssdev,
782 u16 *xres, u16 *yres);
783 void (*get_dimensions)(struct omap_dss_device *dssdev,
784 u32 *width, u32 *height);
785 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
786
787 int (*check_timings)(struct omap_dss_device *dssdev,
788 struct omap_video_timings *timings);
789 void (*set_timings)(struct omap_dss_device *dssdev,
790 struct omap_video_timings *timings);
791 void (*get_timings)(struct omap_dss_device *dssdev,
792 struct omap_video_timings *timings);
793
794 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
795 u32 (*get_wss)(struct omap_dss_device *dssdev);
796
797 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
798 bool (*detect)(struct omap_dss_device *dssdev);
799
800 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
801 int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
802 const struct hdmi_avi_infoframe *avi);
803};
804
805enum omapdss_version omapdss_get_version(void);
806bool omapdss_is_initialized(void);
807
808int omap_dss_register_driver(struct omap_dss_driver *);
809void omap_dss_unregister_driver(struct omap_dss_driver *);
810
811int omapdss_register_display(struct omap_dss_device *dssdev);
812void omapdss_unregister_display(struct omap_dss_device *dssdev);
813
814struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
815void omap_dss_put_device(struct omap_dss_device *dssdev);
816#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
817struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
818struct omap_dss_device *omap_dss_find_device(void *data,
819 int (*match)(struct omap_dss_device *dssdev, void *data));
820const char *omapdss_get_default_display_name(void);
821
822void videomode_to_omap_video_timings(const struct videomode *vm,
823 struct omap_video_timings *ovt);
824void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
825 struct videomode *vm);
826
827int dss_feat_get_num_mgrs(void);
828int dss_feat_get_num_ovls(void);
829enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
830
831
832
833int omap_dss_get_num_overlay_managers(void);
834struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
835
836int omap_dss_get_num_overlays(void);
837struct omap_overlay *omap_dss_get_overlay(int num);
838
839int omapdss_register_output(struct omap_dss_device *output);
840void omapdss_unregister_output(struct omap_dss_device *output);
841struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
842struct omap_dss_device *omap_dss_find_output(const char *name);
843struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
844int omapdss_output_set_device(struct omap_dss_device *out,
845 struct omap_dss_device *dssdev);
846int omapdss_output_unset_device(struct omap_dss_device *out);
847
848struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
849struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
850
851void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
852 u16 *xres, u16 *yres);
853int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
854void omapdss_default_get_timings(struct omap_dss_device *dssdev,
855 struct omap_video_timings *timings);
856
857typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
858int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
859int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
860
861int omapdss_compat_init(void);
862void omapdss_compat_uninit(void);
863
864static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
865{
866 return dssdev->src;
867}
868
869static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
870{
871 return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
872}
873
874struct device_node *
875omapdss_of_get_next_port(const struct device_node *parent,
876 struct device_node *prev);
877
878struct device_node *
879omapdss_of_get_next_endpoint(const struct device_node *parent,
880 struct device_node *prev);
881
882struct device_node *
883omapdss_of_get_first_endpoint(const struct device_node *parent);
884
885struct omap_dss_device *
886omapdss_of_find_source_for_first_ep(struct device_node *node);
22 887
23u32 dispc_read_irqstatus(void); 888u32 dispc_read_irqstatus(void);
24void dispc_clear_irqstatus(u32 mask); 889void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
44 const struct omap_video_timings *timings); 909 const struct omap_video_timings *timings);
45void dispc_mgr_setup(enum omap_channel channel, 910void dispc_mgr_setup(enum omap_channel channel,
46 const struct omap_overlay_manager_info *info); 911 const struct omap_overlay_manager_info *info);
912u32 dispc_mgr_gamma_size(enum omap_channel channel);
913void dispc_mgr_set_gamma(enum omap_channel channel,
914 const struct drm_color_lut *lut,
915 unsigned int length);
47 916
48int dispc_ovl_enable(enum omap_plane plane, bool enable); 917int dispc_ovl_enable(enum omap_plane plane, bool enable);
49bool dispc_ovl_enabled(enum omap_plane plane); 918bool dispc_ovl_enabled(enum omap_plane plane);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 829232ad8c81..24f859488201 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,8 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include "omapdss.h"
25
26#include "dss.h" 25#include "dss.h"
27 26
28static LIST_HEAD(output_list); 27static LIST_HEAD(output_list);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index f974ddcd3b6e..0a76c89cdc2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -22,8 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include "omapdss.h"
26
27#include "dss.h" 26#include "dss.h"
28 27
29#define PLL_CONTROL 0x0000 28#define PLL_CONTROL 0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
76 return NULL; 75 return NULL;
77} 76}
78 77
78struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
79{
80 struct dss_pll *pll;
81
82 switch (src) {
83 default:
84 case DSS_CLK_SRC_FCK:
85 return NULL;
86
87 case DSS_CLK_SRC_HDMI_PLL:
88 return dss_pll_find("hdmi");
89
90 case DSS_CLK_SRC_PLL1_1:
91 case DSS_CLK_SRC_PLL1_2:
92 case DSS_CLK_SRC_PLL1_3:
93 pll = dss_pll_find("dsi0");
94 if (!pll)
95 pll = dss_pll_find("video0");
96 return pll;
97
98 case DSS_CLK_SRC_PLL2_1:
99 case DSS_CLK_SRC_PLL2_2:
100 case DSS_CLK_SRC_PLL2_3:
101 pll = dss_pll_find("dsi1");
102 if (!pll)
103 pll = dss_pll_find("video1");
104 return pll;
105 }
106}
107
108unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
109{
110 switch (src) {
111 case DSS_CLK_SRC_HDMI_PLL:
112 return 0;
113
114 case DSS_CLK_SRC_PLL1_1:
115 case DSS_CLK_SRC_PLL2_1:
116 return 0;
117
118 case DSS_CLK_SRC_PLL1_2:
119 case DSS_CLK_SRC_PLL2_2:
120 return 1;
121
122 case DSS_CLK_SRC_PLL1_3:
123 case DSS_CLK_SRC_PLL2_3:
124 return 2;
125
126 default:
127 return 0;
128 }
129}
130
79int dss_pll_enable(struct dss_pll *pll) 131int dss_pll_enable(struct dss_pll *pll)
80{ 132{
81 int r; 133 int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
129 return 0; 181 return 0;
130} 182}
131 183
132bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 184bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
133 unsigned long out_min, unsigned long out_max, 185 unsigned long out_min, unsigned long out_max,
134 dss_hsdiv_calc_func func, void *data) 186 dss_hsdiv_calc_func func, void *data)
135{ 187{
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
154 return false; 206 return false;
155} 207}
156 208
157bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 209/*
210 * clkdco = clkin / n * m * 2
211 * clkoutX = clkdco / mX
212 */
213bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
158 unsigned long pll_min, unsigned long pll_max, 214 unsigned long pll_min, unsigned long pll_max,
159 dss_pll_calc_func func, void *data) 215 dss_pll_calc_func func, void *data)
160{ 216{
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
195 return false; 251 return false;
196} 252}
197 253
254/*
255 * This calculates a PLL config that will provide the target_clkout rate
256 * for clkout. Additionally clkdco rate will be the same as clkout rate
257 * when clkout rate is >= min_clkdco.
258 *
259 * clkdco = clkin / n * m + clkin / n * mf / 262144
260 * clkout = clkdco / m2
261 */
262bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
263 unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
264{
265 unsigned long fint, clkdco, clkout;
266 unsigned long target_clkdco;
267 unsigned long min_dco;
268 unsigned n, m, mf, m2, sd;
269 const struct dss_pll_hw *hw = pll->hw;
270
271 DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
272
273 /* Fint */
274 n = DIV_ROUND_UP(clkin, hw->fint_max);
275 fint = clkin / n;
276
277 /* adjust m2 so that the clkdco will be high enough */
278 min_dco = roundup(hw->clkdco_min, fint);
279 m2 = DIV_ROUND_UP(min_dco, target_clkout);
280 if (m2 == 0)
281 m2 = 1;
282
283 target_clkdco = target_clkout * m2;
284 m = target_clkdco / fint;
285
286 clkdco = fint * m;
287
288 /* adjust clkdco with fractional mf */
289 if (WARN_ON(target_clkdco - clkdco > fint))
290 mf = 0;
291 else
292 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
293
294 if (mf > 0)
295 clkdco += (u32)div_u64((u64)mf * fint, 262144);
296
297 clkout = clkdco / m2;
298
299 /* sigma-delta */
300 sd = DIV_ROUND_UP(fint * m, 250000000);
301
302 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
303 n, m, mf, m2, sd);
304 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
305
306 cinfo->n = n;
307 cinfo->m = m;
308 cinfo->mf = mf;
309 cinfo->mX[0] = m2;
310 cinfo->sd = sd;
311
312 cinfo->fint = fint;
313 cinfo->clkdco = clkdco;
314 cinfo->clkout[0] = clkout;
315
316 return true;
317}
318
198static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) 319static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
199{ 320{
200 unsigned long timeout; 321 unsigned long timeout;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index 3796576dfadf..cd53566d75eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include "omapdss.h"
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index cd6d3bfb041d..0a96c321ce62 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include "omapdss.h"
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08a2cc778ba9..6eedf2118708 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,8 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include "omapdss.h"
41
42#include "dss.h" 41#include "dss.h"
43#include "dss_features.h" 42#include "dss_features.h"
44 43
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b1ec59e42940..7429de928d4e 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include "omapdss.h"
21
22#include "dss.h" 21#include "dss.h"
23#include "dss_features.h" 22#include "dss_features.h"
24 23
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
108}; 107};
109 108
110static const struct dss_pll_hw dss_dra7_video_pll_hw = { 109static const struct dss_pll_hw dss_dra7_video_pll_hw = {
110 .type = DSS_PLL_TYPE_A,
111
111 .n_max = (1 << 8) - 1, 112 .n_max = (1 << 8) - 1,
112 .m_max = (1 << 12) - 1, 113 .m_max = (1 << 12) - 1,
113 .mX_max = (1 << 5) - 1, 114 .mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
124 .mX_lsb[0] = 21, 125 .mX_lsb[0] = 21,
125 .mX_msb[1] = 30, 126 .mX_msb[1] = 30,
126 .mX_lsb[1] = 26, 127 .mX_lsb[1] = 26,
128 .mX_msb[2] = 4,
129 .mX_lsb[2] = 0,
130 .mX_msb[3] = 9,
131 .mX_lsb[3] = 5,
127 132
128 .has_refsel = true, 133 .has_refsel = true,
129}; 134};
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index ce2d67b6a8c7..137fe690a0da 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,7 +32,6 @@
32struct omap_connector { 32struct omap_connector {
33 struct drm_connector base; 33 struct drm_connector base;
34 struct omap_dss_device *dssdev; 34 struct omap_dss_device *dssdev;
35 struct drm_encoder *encoder;
36 bool hdmi_mode; 35 bool hdmi_mode;
37}; 36};
38 37
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
256 return ret; 255 return ret;
257} 256}
258 257
259struct drm_encoder *omap_connector_attached_encoder(
260 struct drm_connector *connector)
261{
262 struct omap_connector *omap_connector = to_omap_connector(connector);
263 return omap_connector->encoder;
264}
265
266static const struct drm_connector_funcs omap_connector_funcs = { 258static const struct drm_connector_funcs omap_connector_funcs = {
267 .dpms = drm_atomic_helper_connector_dpms, 259 .dpms = drm_atomic_helper_connector_dpms,
268 .reset = drm_atomic_helper_connector_reset, 260 .reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
276static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { 268static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
277 .get_modes = omap_connector_get_modes, 269 .get_modes = omap_connector_get_modes,
278 .mode_valid = omap_connector_mode_valid, 270 .mode_valid = omap_connector_mode_valid,
279 .best_encoder = omap_connector_attached_encoder,
280}; 271};
281 272
282/* initialize connector */ 273/* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
296 goto fail; 287 goto fail;
297 288
298 omap_connector->dssdev = dssdev; 289 omap_connector->dssdev = dssdev;
299 omap_connector->encoder = encoder;
300 290
301 connector = &omap_connector->base; 291 connector = &omap_connector->base;
302 292
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 075f2bb44867..180f644e861e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
372 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 372 copy_timings_drm_to_omap(&omap_crtc->timings, mode);
373} 373}
374 374
375static int omap_crtc_atomic_check(struct drm_crtc *crtc,
376 struct drm_crtc_state *state)
377{
378 if (state->color_mgmt_changed && state->gamma_lut) {
379 uint length = state->gamma_lut->length /
380 sizeof(struct drm_color_lut);
381
382 if (length < 2)
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
375static void omap_crtc_atomic_begin(struct drm_crtc *crtc, 389static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
376 struct drm_crtc_state *old_crtc_state) 390 struct drm_crtc_state *old_crtc_state)
377{ 391{
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
384 398
385 WARN_ON(omap_crtc->vblank_irq.registered); 399 WARN_ON(omap_crtc->vblank_irq.registered);
386 400
401 if (crtc->state->color_mgmt_changed) {
402 struct drm_color_lut *lut = NULL;
403 uint length = 0;
404
405 if (crtc->state->gamma_lut) {
406 lut = (struct drm_color_lut *)
407 crtc->state->gamma_lut->data;
408 length = crtc->state->gamma_lut->length /
409 sizeof(*lut);
410 }
411 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
412 }
413
414 if (crtc->state->color_mgmt_changed) {
415 struct drm_color_lut *lut = NULL;
416 uint length = 0;
417
418 if (crtc->state->gamma_lut) {
419 lut = (struct drm_color_lut *)
420 crtc->state->gamma_lut->data;
421 length = crtc->state->gamma_lut->length /
422 sizeof(*lut);
423 }
424 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
425 }
426
387 if (dispc_mgr_is_enabled(omap_crtc->channel)) { 427 if (dispc_mgr_is_enabled(omap_crtc->channel)) {
388 428
389 DBG("%s: GO", omap_crtc->name); 429 DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
460 .set_config = drm_atomic_helper_set_config, 500 .set_config = drm_atomic_helper_set_config,
461 .destroy = omap_crtc_destroy, 501 .destroy = omap_crtc_destroy,
462 .page_flip = drm_atomic_helper_page_flip, 502 .page_flip = drm_atomic_helper_page_flip,
503 .gamma_set = drm_atomic_helper_legacy_gamma_set,
463 .set_property = drm_atomic_helper_crtc_set_property, 504 .set_property = drm_atomic_helper_crtc_set_property,
464 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 505 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
465 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 506 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
471 .mode_set_nofb = omap_crtc_mode_set_nofb, 512 .mode_set_nofb = omap_crtc_mode_set_nofb,
472 .disable = omap_crtc_disable, 513 .disable = omap_crtc_disable,
473 .enable = omap_crtc_enable, 514 .enable = omap_crtc_enable,
515 .atomic_check = omap_crtc_atomic_check,
474 .atomic_begin = omap_crtc_atomic_begin, 516 .atomic_begin = omap_crtc_atomic_begin,
475 .atomic_flush = omap_crtc_atomic_flush, 517 .atomic_flush = omap_crtc_atomic_flush,
476}; 518};
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
534 576
535 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); 577 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
536 578
579 /* The dispc API adapts to what ever size, but the HW supports
580 * 256 element gamma table for LCDs and 1024 element table for
581 * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
582 * tables so lets use that. Size of HW gamma table can be
583 * extracted with dispc_mgr_gamma_size(). If it returns 0
584 * gamma table is not supprted.
585 */
586 if (dispc_mgr_gamma_size(channel)) {
587 uint gamma_lut_size = 256;
588
589 drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
590 drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
591 }
592
537 omap_plane_install_properties(crtc->primary, &crtc->base); 593 omap_plane_install_properties(crtc->primary, &crtc->base);
538 594
539 omap_crtcs[channel] = omap_crtc; 595 omap_crtcs[channel] = omap_crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 6f5fc14fc015..479bf24050f8 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -17,6 +17,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
21
20#include <drm/drm_crtc.h> 22#include <drm/drm_crtc.h>
21#include <drm/drm_fb_helper.h> 23#include <drm/drm_fb_helper.h>
22 24
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index de275a5be1db..4ceed7a9762f 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> /* platform_device() */ 28#include <linux/platform_device.h> /* platform_device() */
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/seq_file.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/time.h> 32#include <linux/time.h>
32#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d86f5479345b..26c6134eb744 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
142{ 142{
143 struct omap_drm_private *priv = dev->dev_private; 143 struct omap_drm_private *priv = dev->dev_private;
144 struct omap_atomic_state_commit *commit; 144 struct omap_atomic_state_commit *commit;
145 unsigned int i; 145 struct drm_crtc *crtc;
146 int ret; 146 struct drm_crtc_state *crtc_state;
147 int i, ret;
147 148
148 ret = drm_atomic_helper_prepare_planes(dev, state); 149 ret = drm_atomic_helper_prepare_planes(dev, state);
149 if (ret) 150 if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
163 /* Wait until all affected CRTCs have completed previous commits and 164 /* Wait until all affected CRTCs have completed previous commits and
164 * mark them as pending. 165 * mark them as pending.
165 */ 166 */
166 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 167 for_each_crtc_in_state(state, crtc, crtc_state, i)
167 if (state->crtcs[i]) 168 commit->crtcs |= drm_crtc_mask(crtc);
168 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
169 }
170 169
171 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); 170 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
172 171
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
175 spin_unlock(&priv->commit.lock); 174 spin_unlock(&priv->commit.lock);
176 175
177 /* Swap the state, this is the point of no return. */ 176 /* Swap the state, this is the point of no return. */
178 drm_atomic_helper_swap_state(dev, state); 177 drm_atomic_helper_swap_state(state, true);
179 178
180 if (nonblock) 179 if (nonblock)
181 schedule_work(&commit->work); 180 schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
203 return DRM_MODE_CONNECTOR_HDMIA; 202 return DRM_MODE_CONNECTOR_HDMIA;
204 case OMAP_DISPLAY_TYPE_DVI: 203 case OMAP_DISPLAY_TYPE_DVI:
205 return DRM_MODE_CONNECTOR_DVID; 204 return DRM_MODE_CONNECTOR_DVID;
205 case OMAP_DISPLAY_TYPE_DSI:
206 return DRM_MODE_CONNECTOR_DSI;
206 default: 207 default:
207 return DRM_MODE_CONNECTOR_Unknown; 208 return DRM_MODE_CONNECTOR_Unknown;
208 } 209 }
@@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = {
800 .unload = dev_unload, 801 .unload = dev_unload,
801 .open = dev_open, 802 .open = dev_open,
802 .lastclose = dev_lastclose, 803 .lastclose = dev_lastclose,
803 .set_busid = drm_platform_set_busid,
804 .get_vblank_counter = drm_vblank_no_hw_counter, 804 .get_vblank_counter = drm_vblank_no_hw_counter,
805 .enable_vblank = omap_irq_enable_vblank, 805 .enable_vblank = omap_irq_enable_vblank,
806 .disable_vblank = omap_irq_disable_vblank, 806 .disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3f823c368912..dcc30a98b9d4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -24,7 +24,6 @@
24#include <linux/platform_data/omap_drm.h> 24#include <linux/platform_data/omap_drm.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <video/omapdss.h>
28 27
29#include <drm/drmP.h> 28#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
183 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 182 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
184struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 183struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
185 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 184 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
186struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
187int omap_framebuffer_pin(struct drm_framebuffer *fb); 185int omap_framebuffer_pin(struct drm_framebuffer *fb);
188void omap_framebuffer_unpin(struct drm_framebuffer *fb); 186void omap_framebuffer_unpin(struct drm_framebuffer *fb);
189void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 187void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
231 int x, int y, dma_addr_t *paddr); 229 int x, int y, dma_addr_t *paddr);
232uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); 230uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
233size_t omap_gem_mmap_size(struct drm_gem_object *obj); 231size_t omap_gem_mmap_size(struct drm_gem_object *obj);
234int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
235int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); 232int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
236 233
237struct dma_buf *omap_gem_prime_export(struct drm_device *dev, 234struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
239struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 236struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
240 struct dma_buf *buffer); 237 struct dma_buf *buffer);
241 238
242static inline int align_pitch(int pitch, int width, int bpp)
243{
244 int bytespp = (bpp + 7) / 8;
245 /* in case someone tries to feed us a completely bogus stride: */
246 pitch = max(pitch, width * bytespp);
247 /* PVR needs alignment to 8 pixels.. right now that is the most
248 * restrictive stride requirement..
249 */
250 return roundup(pitch, 8 * bytespp);
251}
252
253/* map crtc to vblank mask */ 239/* map crtc to vblank mask */
254uint32_t pipe2vbl(struct drm_crtc *crtc); 240uint32_t pipe2vbl(struct drm_crtc *crtc);
255struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); 241struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 94ec06d3d737..983c8cf2441c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -17,6 +17,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
21
20#include <drm/drm_crtc.h> 22#include <drm/drm_crtc.h>
21#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
22 24
@@ -120,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
120 kfree(omap_fb); 122 kfree(omap_fb);
121} 123}
122 124
123static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
124 struct drm_file *file_priv, unsigned flags, unsigned color,
125 struct drm_clip_rect *clips, unsigned num_clips)
126{
127 return 0;
128}
129
130static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { 125static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
131 .create_handle = omap_framebuffer_create_handle, 126 .create_handle = omap_framebuffer_create_handle,
132 .destroy = omap_framebuffer_destroy, 127 .destroy = omap_framebuffer_destroy,
133 .dirty = omap_framebuffer_dirty,
134}; 128};
135 129
136static uint32_t get_linear_addr(struct plane *plane, 130static uint32_t get_linear_addr(struct plane *plane,
@@ -318,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
318 mutex_unlock(&omap_fb->lock); 312 mutex_unlock(&omap_fb->lock);
319} 313}
320 314
321struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
322{
323 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
324 if (p >= drm_format_num_planes(fb->pixel_format))
325 return NULL;
326 return omap_fb->planes[p].bo;
327}
328
329/* iterate thru all the connectors, returning ones that are attached 315/* iterate thru all the connectors, returning ones that are attached
330 * to the same fb.. 316 * to the same fb..
331 */ 317 */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 89da41ac64d2..adb10fbe918d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
125 mode_cmd.width = sizes->surface_width; 125 mode_cmd.width = sizes->surface_width;
126 mode_cmd.height = sizes->surface_height; 126 mode_cmd.height = sizes->surface_height;
127 127
128 mode_cmd.pitches[0] = align_pitch( 128 mode_cmd.pitches[0] =
129 mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 129 DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
130 mode_cmd.width, sizes->surface_bpp);
131 130
132 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 131 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
133 if (fbdev->ywrap_enabled) { 132 if (fbdev->ywrap_enabled) {
@@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
280 if (ret) 279 if (ret)
281 goto fini; 280 goto fini;
282 281
283 /* disable all the possible outputs/crtcs before entering KMS mode */
284 drm_helper_disable_unused_functions(dev);
285
286 ret = drm_fb_helper_initial_config(helper, 32); 282 ret = drm_fb_helper_initial_config(helper, 32);
287 if (ret) 283 if (ret)
288 goto fini; 284 goto fini;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index b97afc281778..9b3f565fd8d7 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
20#include <linux/shmem_fs.h> 21#include <linux/shmem_fs.h>
21#include <linux/spinlock.h> 22#include <linux/spinlock.h>
22#include <linux/pfn_t.h> 23#include <linux/pfn_t.h>
@@ -382,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
382 return size; 383 return size;
383} 384}
384 385
385/* get tiled size, returns -EINVAL if not tiled buffer */
386int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
387{
388 struct omap_gem_object *omap_obj = to_omap_bo(obj);
389 if (omap_obj->flags & OMAP_BO_TILED) {
390 *w = omap_obj->width;
391 *h = omap_obj->height;
392 return 0;
393 }
394 return -EINVAL;
395}
396
397/* ----------------------------------------------------------------------------- 386/* -----------------------------------------------------------------------------
398 * Fault Handling 387 * Fault Handling
399 */ 388 */
@@ -660,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
660{ 649{
661 union omap_gem_size gsize; 650 union omap_gem_size gsize;
662 651
663 args->pitch = align_pitch(0, args->width, args->bpp); 652 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
653
664 args->size = PAGE_ALIGN(args->pitch * args->height); 654 args->size = PAGE_ALIGN(args->pitch * args->height);
665 655
666 gsize = (union omap_gem_size){ 656 gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385892..ad429683fef7 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
221{ 221{
222 struct drm_device *dev = crtc->dev; 222 struct drm_device *dev = crtc->dev;
223 struct qxl_device *qdev = dev->dev_private; 223 struct qxl_device *qdev = dev->dev_private;
224 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
225 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); 224 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
226 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); 225 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
227 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); 226 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
252 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, 251 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
253 &norect, one_clip_rect, inc); 252 &norect, one_clip_rect, inc);
254 253
255 drm_vblank_get(dev, qcrtc->index); 254 drm_crtc_vblank_get(crtc);
256 255
257 if (event) { 256 if (event) {
258 spin_lock_irqsave(&dev->event_lock, flags); 257 spin_lock_irqsave(&dev->event_lock, flags);
259 drm_send_vblank_event(dev, qcrtc->index, event); 258 drm_crtc_send_vblank_event(crtc, event);
260 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
261 } 260 }
262 drm_vblank_put(dev, qcrtc->index); 261 drm_crtc_vblank_put(crtc);
263 262
264 ret = qxl_bo_reserve(bo, false); 263 ret = qxl_bo_reserve(bo, false);
265 if (!ret) { 264 if (!ret) {
@@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
730 729
731 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); 730 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
732 qxl_crtc->index = crtc_id; 731 qxl_crtc->index = crtc_id;
733 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
734 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); 732 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
735 return 0; 733 return 0;
736} 734}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe50ba..460bbceae297 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
256 .gem_prime_vmap = qxl_gem_prime_vmap, 256 .gem_prime_vmap = qxl_gem_prime_vmap,
257 .gem_prime_vunmap = qxl_gem_prime_vunmap, 257 .gem_prime_vunmap = qxl_gem_prime_vunmap,
258 .gem_prime_mmap = qxl_gem_prime_mmap, 258 .gem_prime_mmap = qxl_gem_prime_mmap,
259 .gem_free_object = qxl_gem_object_free, 259 .gem_free_object_unlocked = qxl_gem_object_free,
260 .gem_open_object = qxl_gem_object_open, 260 .gem_open_object = qxl_gem_object_open,
261 .gem_close_object = qxl_gem_object_close, 261 .gem_close_object = qxl_gem_object_close,
262 .fops = &qxl_fops, 262 .fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f6320b8..df2657051afd 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
131 int ret; 131 int ret;
132 int aligned_size, size; 132 int aligned_size, size;
133 int height = mode_cmd->height; 133 int height = mode_cmd->height;
134 int bpp;
135 int depth;
136
137 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
138 134
139 size = mode_cmd->pitches[0] * height; 135 size = mode_cmd->pitches[0] * height;
140 aligned_size = ALIGN(size, PAGE_SIZE); 136 aligned_size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
96 return 0; 96 return 0;
97 97
98 if (have_drawable_releases && sc > 300) { 98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d " 99 FENCE_WARN(fence, "failed to wait on release %llu "
100 "after spincount %d\n", 100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc); 101 fence->context & ~0xf0000000, sc);
102 goto signaled; 102 goto signaled;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2e216e2ea78c..a97abc8af657 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 if (dev->num_crtcs > radeon_crtc->crtc_id) 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id); 279 drm_crtc_vblank_on(crtc);
280 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
281 break; 281 break;
282 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
283 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
284 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
285 if (dev->num_crtcs > radeon_crtc->crtc_id) 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id); 286 drm_crtc_vblank_off(crtc);
287 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
288 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
591 /* use frac fb div on RS780/RS880 */ 591 /* use frac fb div on RS780/RS880 */
592 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 592 if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
593 && !radeon_crtc->ss_enabled)
593 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 594 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
594 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 595 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
595 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 596 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
626 if (radeon_crtc->ss.refdiv) { 627 if (radeon_crtc->ss.refdiv) {
627 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
628 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
629 if (ASIC_IS_AVIVO(rdev)) 630 if (rdev->family >= CHIP_RV770)
630 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 631 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
631 } 632 }
632 } 633 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
2386 struct radeon_mman mman; 2386 struct radeon_mman mman;
2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
2388 wait_queue_head_t fence_queue; 2388 wait_queue_head_t fence_queue;
2389 unsigned fence_context; 2389 u64 fence_context;
2390 struct mutex ring_lock; 2390 struct mutex ring_lock;
2391 struct radeon_ring ring[RADEON_NUM_RINGS]; 2391 struct radeon_ring ring[RADEON_NUM_RINGS];
2392 bool ib_pool_ready; 2392 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e721e6b2766e..21c44b2293bc 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
630/* 630/*
631 * GPU helpers function. 631 * GPU helpers function.
632 */ 632 */
633
634/**
635 * radeon_device_is_virtual - check if we are running is a virtual environment
636 *
637 * Check if the asic has been passed through to a VM (all asics).
638 * Used at driver startup.
639 * Returns true if virtual or false if not.
640 */
641static bool radeon_device_is_virtual(void)
642{
643#ifdef CONFIG_X86
644 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
645#else
646 return false;
647#endif
648}
649
633/** 650/**
634 * radeon_card_posted - check if the hw has already been initialized 651 * radeon_card_posted - check if the hw has already been initialized
635 * 652 *
@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
643{ 660{
644 uint32_t reg; 661 uint32_t reg;
645 662
663 /* for pass through, always force asic_init */
664 if (radeon_device_is_virtual())
665 return false;
666
646 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 667 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
647 if (efi_enabled(EFI_BOOT) && 668 if (efi_enabled(EFI_BOOT) &&
648 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 669 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1631 radeon_agp_suspend(rdev); 1652 radeon_agp_suspend(rdev);
1632 1653
1633 pci_save_state(dev->pdev); 1654 pci_save_state(dev->pdev);
1634 if (freeze && rdev->family >= CHIP_R600) { 1655 if (freeze && rdev->family >= CHIP_CEDAR) {
1635 rdev->asic->asic_reset(rdev, true); 1656 rdev->asic->asic_reset(rdev, true);
1636 pci_restore_state(dev->pdev); 1657 pci_restore_state(dev->pdev);
1637 } else if (suspend) { 1658 } else if (suspend) {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982647..3965d1916b9c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
231 *blue = radeon_crtc->lut_b[regno] << 6; 231 *blue = radeon_crtc->lut_b[regno] << 6;
232} 232}
233 233
234static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
235 u16 *blue, uint32_t start, uint32_t size) 235 u16 *blue, uint32_t size)
236{ 236{
237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
238 int end = (start + size > 256) ? 256 : start + size, i; 238 int i;
239 239
240 /* userspace palettes are always correct as is */ 240 /* userspace palettes are always correct as is */
241 for (i = start; i < end; i++) { 241 for (i = 0; i < size; i++) {
242 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_r[i] = red[i] >> 6;
243 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_g[i] = green[i] >> 6;
244 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 radeon_crtc->lut_b[i] = blue[i] >> 6;
245 } 245 }
246 radeon_crtc_load_lut(crtc); 246 radeon_crtc_load_lut(crtc);
247
248 return 0;
247} 249}
248 250
249static void radeon_crtc_destroy(struct drm_crtc *crtc) 251static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
381 383
382 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 384 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
383 385
384 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 386 drm_crtc_vblank_put(&radeon_crtc->base);
385 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 387 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
386 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 388 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
387} 389}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
598 } 600 }
599 work->base = base; 601 work->base = base;
600 602
601 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 603 r = drm_crtc_vblank_get(crtc);
602 if (r) { 604 if (r) {
603 DRM_ERROR("failed to get vblank before flip\n"); 605 DRM_ERROR("failed to get vblank before flip\n");
604 goto pflip_cleanup; 606 goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
625 return 0; 627 return 0;
626 628
627vblank_cleanup: 629vblank_cleanup:
628 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); 630 drm_crtc_vblank_put(crtc);
629 631
630pflip_cleanup: 632pflip_cleanup:
631 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { 633 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
688 pm_runtime_put_autosuspend(dev->dev); 690 pm_runtime_put_autosuspend(dev->dev);
689 return ret; 691 return ret;
690} 692}
693
691static const struct drm_crtc_funcs radeon_crtc_funcs = { 694static const struct drm_crtc_funcs radeon_crtc_funcs = {
692 .cursor_set2 = radeon_crtc_cursor_set2, 695 .cursor_set2 = radeon_crtc_cursor_set2,
693 .cursor_move = radeon_crtc_cursor_move, 696 .cursor_move = radeon_crtc_cursor_move,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa740171f..a455dc7d4aa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35 35
36#include <drm/drm_pciids.h> 36#include <drm/drm_pciids.h>
37#include <linux/apple-gmux.h>
38#include <linux/console.h> 37#include <linux/console.h>
39#include <linux/module.h> 38#include <linux/module.h>
40#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
41#include <linux/vgaarb.h>
42#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
43#include <drm/drm_gem.h> 41#include <drm/drm_gem.h>
44 42
@@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
340 if (ret == -EPROBE_DEFER) 338 if (ret == -EPROBE_DEFER)
341 return ret; 339 return ret;
342 340
343 /* 341 if (vga_switcheroo_client_probe_defer(pdev))
344 * apple-gmux is needed on dual GPU MacBook Pro
345 * to probe the panel if we're the inactive GPU.
346 */
347 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
348 apple_gmux_present() && pdev != vga_default_device() &&
349 !vga_switcheroo_handler_flags())
350 return -EPROBE_DEFER; 342 return -EPROBE_DEFER;
351 343
352 /* Get rid of things like offb */ 344 /* Get rid of things like offb */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 478d4099b0d0..d0de4022fff9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 if (dev->num_crtcs > radeon_crtc->crtc_id) 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id); 335 drm_crtc_vblank_on(crtc);
336 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
337 break; 337 break;
338 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
339 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
340 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
341 if (dev->num_crtcs > radeon_crtc->crtc_id) 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id); 342 drm_crtc_vblank_off(crtc);
343 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
345 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925a5b..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
246 246
247static void radeon_pm_set_clocks(struct radeon_device *rdev) 247static void radeon_pm_set_clocks(struct radeon_device *rdev)
248{ 248{
249 struct drm_crtc *crtc;
249 int i, r; 250 int i, r;
250 251
251 /* no need to take locks, etc. if nothing's going to change */ 252 /* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
274 radeon_unmap_vram_bos(rdev); 275 radeon_unmap_vram_bos(rdev);
275 276
276 if (rdev->irq.installed) { 277 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 278 i = 0;
279 drm_for_each_crtc(crtc, rdev->ddev) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 280 if (rdev->pm.active_crtcs & (1 << i)) {
279 /* This can fail if a modeset is in progress */ 281 /* This can fail if a modeset is in progress */
280 if (drm_vblank_get(rdev->ddev, i) == 0) 282 if (drm_crtc_vblank_get(crtc) == 0)
281 rdev->pm.req_vblank |= (1 << i); 283 rdev->pm.req_vblank |= (1 << i);
282 else 284 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 285 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i); 286 i);
285 } 287 }
288 i++;
286 } 289 }
287 } 290 }
288 291
289 radeon_set_power_state(rdev); 292 radeon_set_power_state(rdev);
290 293
291 if (rdev->irq.installed) { 294 if (rdev->irq.installed) {
292 for (i = 0; i < rdev->num_crtc; i++) { 295 i = 0;
296 drm_for_each_crtc(crtc, rdev->ddev) {
293 if (rdev->pm.req_vblank & (1 << i)) { 297 if (rdev->pm.req_vblank & (1 << i)) {
294 rdev->pm.req_vblank &= ~(1 << i); 298 rdev->pm.req_vblank &= ~(1 << i);
295 drm_vblank_put(rdev->ddev, i); 299 drm_crtc_vblank_put(crtc);
296 } 300 }
301 i++;
297 } 302 }
298 } 303 }
299 304
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27883..899ef7a2a7b4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
217 .get_vblank_counter = drm_vblank_no_hw_counter, 217 .get_vblank_counter = drm_vblank_no_hw_counter,
218 .enable_vblank = rcar_du_enable_vblank, 218 .enable_vblank = rcar_du_enable_vblank,
219 .disable_vblank = rcar_du_disable_vblank, 219 .disable_vblank = rcar_du_disable_vblank,
220 .gem_free_object = drm_gem_cma_free_object, 220 .gem_free_object_unlocked = drm_gem_cma_free_object,
221 .gem_vm_ops = &drm_gem_cma_vm_ops, 221 .gem_vm_ops = &drm_gem_cma_vm_ops,
222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev)
278 struct rcar_du_device *rcdu = platform_get_drvdata(pdev); 278 struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
279 struct drm_device *ddev = rcdu->ddev; 279 struct drm_device *ddev = rcdu->ddev;
280 280
281 drm_connector_unregister_all(ddev);
282 drm_dev_unregister(ddev); 281 drm_dev_unregister(ddev);
283 282
284 if (rcdu->fbdev) 283 if (rcdu->fbdev)
@@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev)
320 if (!ddev) 319 if (!ddev)
321 return -ENOMEM; 320 return -ENOMEM;
322 321
323 drm_dev_set_unique(ddev, dev_name(&pdev->dev));
324
325 rcdu->ddev = ddev; 322 rcdu->ddev = ddev;
326 ddev->dev_private = rcdu; 323 ddev->dev_private = rcdu;
327 324
@@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev)
339 * disabled for all CRTCs. 336 * disabled for all CRTCs.
340 */ 337 */
341 ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1); 338 ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
342 if (ret < 0) { 339 if (ret < 0)
343 dev_err(&pdev->dev, "failed to initialize vblank\n");
344 goto error; 340 goto error;
345 }
346 341
347 /* DRM/KMS objects */ 342 /* DRM/KMS objects */
348 ret = rcar_du_modeset_init(rcdu); 343 ret = rcar_du_modeset_init(rcdu);
349 if (ret < 0) { 344 if (ret < 0) {
350 dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); 345 if (ret != -EPROBE_DEFER)
346 dev_err(&pdev->dev,
347 "failed to initialize DRM/KMS (%d)\n", ret);
351 goto error; 348 goto error;
352 } 349 }
353 350
@@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev)
360 if (ret) 357 if (ret)
361 goto error; 358 goto error;
362 359
363 ret = drm_connector_register_all(ddev);
364 if (ret < 0)
365 goto error;
366
367 DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); 360 DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
368 361
369 return 0; 362 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4e939e41f030..55149e9ce28e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -27,18 +27,6 @@
27#include "rcar_du_vgacon.h" 27#include "rcar_du_vgacon.h"
28 28
29/* ----------------------------------------------------------------------------- 29/* -----------------------------------------------------------------------------
30 * Common connector functions
31 */
32
33struct drm_encoder *
34rcar_du_connector_best_encoder(struct drm_connector *connector)
35{
36 struct rcar_du_connector *rcon = to_rcar_connector(connector);
37
38 return rcar_encoder_to_drm_encoder(rcon->encoder);
39}
40
41/* -----------------------------------------------------------------------------
42 * Encoder 30 * Encoder
43 */ 31 */
44 32
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 719b6f2a031c..a8669c3e0dd5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -49,9 +49,6 @@ struct rcar_du_connector {
49#define to_rcar_connector(c) \ 49#define to_rcar_connector(c) \
50 container_of(c, struct rcar_du_connector, connector) 50 container_of(c, struct rcar_du_connector, connector)
51 51
52struct drm_encoder *
53rcar_du_connector_best_encoder(struct drm_connector *connector);
54
55int rcar_du_encoder_init(struct rcar_du_device *rcdu, 52int rcar_du_encoder_init(struct rcar_du_device *rcdu,
56 enum rcar_du_encoder_type type, 53 enum rcar_du_encoder_type type,
57 enum rcar_du_output output, 54 enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 6c927144b5c9..612b4d5ae098 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -52,7 +52,6 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
52static const struct drm_connector_helper_funcs connector_helper_funcs = { 52static const struct drm_connector_helper_funcs connector_helper_funcs = {
53 .get_modes = rcar_du_hdmi_connector_get_modes, 53 .get_modes = rcar_du_hdmi_connector_get_modes,
54 .mode_valid = rcar_du_hdmi_connector_mode_valid, 54 .mode_valid = rcar_du_hdmi_connector_mode_valid,
55 .best_encoder = rcar_du_connector_best_encoder,
56}; 55};
57 56
58static enum drm_connector_status 57static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d970..6bb032d8ac6b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
288{ 288{
289 struct rcar_du_device *rcdu = dev->dev_private; 289 struct rcar_du_device *rcdu = dev->dev_private;
290 struct rcar_du_commit *commit; 290 struct rcar_du_commit *commit;
291 struct drm_crtc *crtc;
292 struct drm_crtc_state *crtc_state;
291 unsigned int i; 293 unsigned int i;
292 int ret; 294 int ret;
293 295
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
309 /* Wait until all affected CRTCs have completed previous commits and 311 /* Wait until all affected CRTCs have completed previous commits and
310 * mark them as pending. 312 * mark them as pending.
311 */ 313 */
312 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 314 for_each_crtc_in_state(state, crtc, crtc_state, i)
313 if (state->crtcs[i]) 315 commit->crtcs |= drm_crtc_mask(crtc);
314 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
315 }
316 316
317 spin_lock(&rcdu->commit.wait.lock); 317 spin_lock(&rcdu->commit.wait.lock);
318 ret = wait_event_interruptible_locked(rcdu->commit.wait, 318 ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
327 } 327 }
328 328
329 /* Swap the state, this is the point of no return. */ 329 /* Swap the state, this is the point of no return. */
330 drm_atomic_helper_swap_state(dev, state); 330 drm_atomic_helper_swap_state(state, true);
331 331
332 if (nonblock) 332 if (nonblock)
333 schedule_work(&commit->work); 333 schedule_work(&commit->work);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index e905f5da7aaa..6afd0af312ba 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
59 59
60static const struct drm_connector_helper_funcs connector_helper_funcs = { 60static const struct drm_connector_helper_funcs connector_helper_funcs = {
61 .get_modes = rcar_du_lvds_connector_get_modes, 61 .get_modes = rcar_du_lvds_connector_get_modes,
62 .best_encoder = rcar_du_connector_best_encoder,
63}; 62};
64 63
65static enum drm_connector_status 64static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f78e1..bfe31ca870cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
140 bool needs_realloc = false; 140 bool needs_realloc = false;
141 unsigned int groups = 0; 141 unsigned int groups = 0;
142 unsigned int i; 142 unsigned int i;
143 struct drm_plane *drm_plane;
144 struct drm_plane_state *drm_plane_state;
143 145
144 /* Check if hardware planes need to be reallocated. */ 146 /* Check if hardware planes need to be reallocated. */
145 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 147 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
146 struct rcar_du_plane_state *plane_state; 148 struct rcar_du_plane_state *plane_state;
147 struct rcar_du_plane *plane; 149 struct rcar_du_plane *plane;
148 unsigned int index; 150 unsigned int index;
149 151
150 if (!state->planes[i]) 152 plane = to_rcar_plane(drm_plane);
151 continue; 153 plane_state = to_rcar_plane_state(drm_plane_state);
152
153 plane = to_rcar_plane(state->planes[i]);
154 plane_state = to_rcar_plane_state(state->plane_states[i]);
155 154
156 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, 155 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
157 plane->group->index, plane - plane->group->planes); 156 plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
247 } 246 }
248 247
249 /* Reallocate hardware planes for each plane that needs it. */ 248 /* Reallocate hardware planes for each plane that needs it. */
250 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 249 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
251 struct rcar_du_plane_state *plane_state; 250 struct rcar_du_plane_state *plane_state;
252 struct rcar_du_plane *plane; 251 struct rcar_du_plane *plane;
253 unsigned int crtc_planes; 252 unsigned int crtc_planes;
254 unsigned int free; 253 unsigned int free;
255 int idx; 254 int idx;
256 255
257 if (!state->planes[i]) 256 plane = to_rcar_plane(drm_plane);
258 continue; 257 plane_state = to_rcar_plane_state(drm_plane_state);
259
260 plane = to_rcar_plane(state->planes[i]);
261 plane_state = to_rcar_plane_state(state->plane_states[i]);
262 258
263 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, 259 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
264 plane->group->index, plane - plane->group->planes); 260 plane->group->index, plane - plane->group->planes);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index d2f66068e52c..fedb0161e234 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -195,9 +195,10 @@
195#define DEFR6_ODPM12_DISP (2 << 8) 195#define DEFR6_ODPM12_DISP (2 << 8)
196#define DEFR6_ODPM12_CDE (3 << 8) 196#define DEFR6_ODPM12_CDE (3 << 8)
197#define DEFR6_ODPM12_MASK (3 << 8) 197#define DEFR6_ODPM12_MASK (3 << 8)
198#define DEFR6_TCNE2 (1 << 6) 198#define DEFR6_TCNE1 (1 << 6)
199#define DEFR6_TCNE0 (1 << 4)
199#define DEFR6_MLOS1 (1 << 2) 200#define DEFR6_MLOS1 (1 << 2)
200#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) 201#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
201 202
202/* ----------------------------------------------------------------------------- 203/* -----------------------------------------------------------------------------
203 * R8A7790-only Control Registers 204 * R8A7790-only Control Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d7e5c99caf6..8d6125c1c0f9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
28 28
29static const struct drm_connector_helper_funcs connector_helper_funcs = { 29static const struct drm_connector_helper_funcs connector_helper_funcs = {
30 .get_modes = rcar_du_vga_connector_get_modes, 30 .get_modes = rcar_du_vga_connector_get_modes,
31 .best_encoder = rcar_du_connector_best_encoder,
32}; 31};
33 32
34static enum drm_connector_status 33static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
79 if (ret < 0) 78 if (ret < 0)
80 return ret; 79 return ret;
81 80
82 rcon->encoder = renc;
83
84 return 0; 81 return 0;
85} 82}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index d30bdc38a760..e48611e83c03 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,6 +2,7 @@ config DRM_ROCKCHIP
2 tristate "DRM Support for Rockchip" 2 tristate "DRM Support for Rockchip"
3 depends on DRM && ROCKCHIP_IOMMU 3 depends on DRM && ROCKCHIP_IOMMU
4 depends on RESET_CONTROLLER 4 depends on RESET_CONTROLLER
5 select DRM_GEM_CMA_HELPER
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER 7 select DRM_KMS_FB_HELPER
7 select DRM_PANEL 8 select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7f6a55cae27a..c120172add5c 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -349,20 +349,11 @@ static int rockchip_dp_remove(struct platform_device *pdev)
349 return 0; 349 return 0;
350} 350}
351 351
352static const struct dev_pm_ops rockchip_dp_pm_ops = {
352#ifdef CONFIG_PM_SLEEP 353#ifdef CONFIG_PM_SLEEP
353static int rockchip_dp_suspend(struct device *dev) 354 .suspend = analogix_dp_suspend,
354{ 355 .resume_early = analogix_dp_resume,
355 return analogix_dp_suspend(dev);
356}
357
358static int rockchip_dp_resume(struct device *dev)
359{
360 return analogix_dp_resume(dev);
361}
362#endif 356#endif
363
364static const struct dev_pm_ops rockchip_dp_pm_ops = {
365 SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
366}; 357};
367 358
368static const struct of_device_id rockchip_dp_dt_ids[] = { 359static const struct of_device_id rockchip_dp_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index dedc65b40f36..ca22e5ee89ca 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
964 return mode_status; 964 return mode_status;
965} 965}
966 966
967static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
968 struct drm_connector *connector)
969{
970 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
971
972 return &dsi->encoder;
973}
974
975static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { 967static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
976 .get_modes = dw_mipi_dsi_connector_get_modes, 968 .get_modes = dw_mipi_dsi_connector_get_modes,
977 .mode_valid = dw_mipi_dsi_mode_valid, 969 .mode_valid = dw_mipi_dsi_mode_valid,
978 .best_encoder = dw_mipi_dsi_connector_best_encoder,
979}; 970};
980 971
981static enum drm_connector_status 972static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8b4feb60b25..006260de9dbd 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
579 return MODE_OK; 579 return MODE_OK;
580} 580}
581 581
582static struct drm_encoder *
583inno_hdmi_connector_best_encoder(struct drm_connector *connector)
584{
585 struct inno_hdmi *hdmi = to_inno_hdmi(connector);
586
587 return &hdmi->encoder;
588}
589
590static int 582static int
591inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, 583inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
592 uint32_t maxX, uint32_t maxY) 584 uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
613static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { 605static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
614 .get_modes = inno_hdmi_connector_get_modes, 606 .get_modes = inno_hdmi_connector_get_modes,
615 .mode_valid = inno_hdmi_connector_mode_valid, 607 .mode_valid = inno_hdmi_connector_mode_valid,
616 .best_encoder = inno_hdmi_connector_best_encoder,
617}; 608};
618 609
619static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) 610static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f703cb..d665fb04d264 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,11 +19,13 @@
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
22#include <drm/drm_gem_cma_helper.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/of_graph.h> 26#include <linux/of_graph.h>
26#include <linux/component.h> 27#include <linux/component.h>
28#include <linux/console.h>
27 29
28#include "rockchip_drm_drv.h" 30#include "rockchip_drm_drv.h"
29#include "rockchip_drm_fb.h" 31#include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
37#define DRIVER_MINOR 0 39#define DRIVER_MINOR 0
38 40
39static bool is_support_iommu = true; 41static bool is_support_iommu = true;
42static struct drm_driver rockchip_drm_driver;
40 43
41/* 44/*
42 * Attach a (component) device to the shared drm dma mapping from master drm 45 * Attach a (component) device to the shared drm dma mapping from master drm
@@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
132 priv->crtc_funcs[pipe]->disable_vblank(crtc); 135 priv->crtc_funcs[pipe]->disable_vblank(crtc);
133} 136}
134 137
135static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) 138static int rockchip_drm_bind(struct device *dev)
136{ 139{
140 struct drm_device *drm_dev;
137 struct rockchip_drm_private *private; 141 struct rockchip_drm_private *private;
138 struct dma_iommu_mapping *mapping = NULL; 142 struct dma_iommu_mapping *mapping = NULL;
139 struct device *dev = drm_dev->dev;
140 struct drm_connector *connector;
141 int ret; 143 int ret;
142 144
143 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); 145 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
144 if (!private) 146 if (!drm_dev)
145 return -ENOMEM; 147 return -ENOMEM;
146 148
147 mutex_init(&private->commit.lock); 149 dev_set_drvdata(dev, drm_dev);
148 INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); 150
151 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
152 if (!private) {
153 ret = -ENOMEM;
154 goto err_free;
155 }
149 156
150 drm_dev->dev_private = private; 157 drm_dev->dev_private = private;
151 158
@@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
186 if (ret) 193 if (ret)
187 goto err_detach_device; 194 goto err_detach_device;
188 195
189 /*
190 * All components are now added, we can publish the connector sysfs
191 * entries to userspace. This will generate hotplug events and so
192 * userspace will expect to be able to access DRM at this point.
193 */
194 list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
195 head) {
196 ret = drm_connector_register(connector);
197 if (ret) {
198 dev_err(drm_dev->dev,
199 "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
200 connector->base.id,
201 connector->name, ret);
202 goto err_unbind;
203 }
204 }
205
206 /* init kms poll for handling hpd */ 196 /* init kms poll for handling hpd */
207 drm_kms_helper_poll_init(drm_dev); 197 drm_kms_helper_poll_init(drm_dev);
208 198
@@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
222 if (ret) 212 if (ret)
223 goto err_vblank_cleanup; 213 goto err_vblank_cleanup;
224 214
215 ret = drm_dev_register(drm_dev, 0);
216 if (ret)
217 goto err_fbdev_fini;
218
225 if (is_support_iommu) 219 if (is_support_iommu)
226 arm_iommu_release_mapping(mapping); 220 arm_iommu_release_mapping(mapping);
227 return 0; 221 return 0;
222err_fbdev_fini:
223 rockchip_drm_fbdev_fini(drm_dev);
228err_vblank_cleanup: 224err_vblank_cleanup:
229 drm_vblank_cleanup(drm_dev); 225 drm_vblank_cleanup(drm_dev);
230err_kms_helper_poll_fini: 226err_kms_helper_poll_fini:
231 drm_kms_helper_poll_fini(drm_dev); 227 drm_kms_helper_poll_fini(drm_dev);
232err_unbind:
233 component_unbind_all(dev, drm_dev); 228 component_unbind_all(dev, drm_dev);
234err_detach_device: 229err_detach_device:
235 if (is_support_iommu) 230 if (is_support_iommu)
@@ -240,12 +235,14 @@ err_release_mapping:
240err_config_cleanup: 235err_config_cleanup:
241 drm_mode_config_cleanup(drm_dev); 236 drm_mode_config_cleanup(drm_dev);
242 drm_dev->dev_private = NULL; 237 drm_dev->dev_private = NULL;
238err_free:
239 drm_dev_unref(drm_dev);
243 return ret; 240 return ret;
244} 241}
245 242
246static int rockchip_drm_unload(struct drm_device *drm_dev) 243static void rockchip_drm_unbind(struct device *dev)
247{ 244{
248 struct device *dev = drm_dev->dev; 245 struct drm_device *drm_dev = dev_get_drvdata(dev);
249 246
250 rockchip_drm_fbdev_fini(drm_dev); 247 rockchip_drm_fbdev_fini(drm_dev);
251 drm_vblank_cleanup(drm_dev); 248 drm_vblank_cleanup(drm_dev);
@@ -255,29 +252,9 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
255 arm_iommu_detach_device(dev); 252 arm_iommu_detach_device(dev);
256 drm_mode_config_cleanup(drm_dev); 253 drm_mode_config_cleanup(drm_dev);
257 drm_dev->dev_private = NULL; 254 drm_dev->dev_private = NULL;
258 255 drm_dev_unregister(drm_dev);
259 return 0; 256 drm_dev_unref(drm_dev);
260} 257 dev_set_drvdata(dev, NULL);
261
262static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
263 struct drm_file *file_priv)
264{
265 struct rockchip_drm_private *priv = crtc->dev->dev_private;
266 int pipe = drm_crtc_index(crtc);
267
268 if (pipe < ROCKCHIP_MAX_CRTC &&
269 priv->crtc_funcs[pipe] &&
270 priv->crtc_funcs[pipe]->cancel_pending_vblank)
271 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
272}
273
274static void rockchip_drm_preclose(struct drm_device *dev,
275 struct drm_file *file_priv)
276{
277 struct drm_crtc *crtc;
278
279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
280 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
281} 258}
282 259
283void rockchip_drm_lastclose(struct drm_device *dev) 260void rockchip_drm_lastclose(struct drm_device *dev)
@@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
300 .release = drm_release, 277 .release = drm_release,
301}; 278};
302 279
303const struct vm_operations_struct rockchip_drm_vm_ops = {
304 .open = drm_gem_vm_open,
305 .close = drm_gem_vm_close,
306};
307
308static struct drm_driver rockchip_drm_driver = { 280static struct drm_driver rockchip_drm_driver = {
309 .driver_features = DRIVER_MODESET | DRIVER_GEM | 281 .driver_features = DRIVER_MODESET | DRIVER_GEM |
310 DRIVER_PRIME | DRIVER_ATOMIC, 282 DRIVER_PRIME | DRIVER_ATOMIC,
311 .load = rockchip_drm_load,
312 .unload = rockchip_drm_unload,
313 .preclose = rockchip_drm_preclose,
314 .lastclose = rockchip_drm_lastclose, 283 .lastclose = rockchip_drm_lastclose,
315 .get_vblank_counter = drm_vblank_no_hw_counter, 284 .get_vblank_counter = drm_vblank_no_hw_counter,
316 .enable_vblank = rockchip_drm_crtc_enable_vblank, 285 .enable_vblank = rockchip_drm_crtc_enable_vblank,
317 .disable_vblank = rockchip_drm_crtc_disable_vblank, 286 .disable_vblank = rockchip_drm_crtc_disable_vblank,
318 .gem_vm_ops = &rockchip_drm_vm_ops, 287 .gem_vm_ops = &drm_gem_cma_vm_ops,
319 .gem_free_object = rockchip_gem_free_object, 288 .gem_free_object_unlocked = rockchip_gem_free_object,
320 .dumb_create = rockchip_gem_dumb_create, 289 .dumb_create = rockchip_gem_dumb_create,
321 .dumb_map_offset = rockchip_gem_dumb_map_offset, 290 .dumb_map_offset = rockchip_gem_dumb_map_offset,
322 .dumb_destroy = drm_gem_dumb_destroy, 291 .dumb_destroy = drm_gem_dumb_destroy,
@@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = {
337}; 306};
338 307
339#ifdef CONFIG_PM_SLEEP 308#ifdef CONFIG_PM_SLEEP
340static int rockchip_drm_sys_suspend(struct device *dev) 309void rockchip_drm_fb_suspend(struct drm_device *drm)
341{ 310{
342 struct drm_device *drm = dev_get_drvdata(dev); 311 struct rockchip_drm_private *priv = drm->dev_private;
343 struct drm_connector *connector;
344 312
345 if (!drm) 313 console_lock();
346 return 0; 314 drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
315 console_unlock();
316}
347 317
348 drm_modeset_lock_all(drm); 318void rockchip_drm_fb_resume(struct drm_device *drm)
349 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 319{
350 int old_dpms = connector->dpms; 320 struct rockchip_drm_private *priv = drm->dev_private;
351 321
352 if (connector->funcs->dpms) 322 console_lock();
353 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 323 drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
324 console_unlock();
325}
354 326
355 /* Set the old mode back to the connector for resume */ 327static int rockchip_drm_sys_suspend(struct device *dev)
356 connector->dpms = old_dpms; 328{
329 struct drm_device *drm = dev_get_drvdata(dev);
330 struct rockchip_drm_private *priv = drm->dev_private;
331
332 drm_kms_helper_poll_disable(drm);
333 rockchip_drm_fb_suspend(drm);
334
335 priv->state = drm_atomic_helper_suspend(drm);
336 if (IS_ERR(priv->state)) {
337 rockchip_drm_fb_resume(drm);
338 drm_kms_helper_poll_enable(drm);
339 return PTR_ERR(priv->state);
357 } 340 }
358 drm_modeset_unlock_all(drm);
359 341
360 return 0; 342 return 0;
361} 343}
@@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
363static int rockchip_drm_sys_resume(struct device *dev) 345static int rockchip_drm_sys_resume(struct device *dev)
364{ 346{
365 struct drm_device *drm = dev_get_drvdata(dev); 347 struct drm_device *drm = dev_get_drvdata(dev);
366 struct drm_connector *connector; 348 struct rockchip_drm_private *priv = drm->dev_private;
367 enum drm_connector_status status;
368 bool changed = false;
369
370 if (!drm)
371 return 0;
372 349
373 drm_modeset_lock_all(drm); 350 drm_atomic_helper_resume(drm, priv->state);
374 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 351 rockchip_drm_fb_resume(drm);
375 int desired_mode = connector->dpms; 352 drm_kms_helper_poll_enable(drm);
376
377 /*
378 * at suspend time, we save dpms to connector->dpms,
379 * restore the old_dpms, and at current time, the connector
380 * dpms status must be DRM_MODE_DPMS_OFF.
381 */
382 connector->dpms = DRM_MODE_DPMS_OFF;
383
384 /*
385 * If the connector has been disconnected during suspend,
386 * disconnect it from the encoder and leave it off. We'll notify
387 * userspace at the end.
388 */
389 if (desired_mode == DRM_MODE_DPMS_ON) {
390 status = connector->funcs->detect(connector, true);
391 if (status == connector_status_disconnected) {
392 connector->encoder = NULL;
393 connector->status = status;
394 changed = true;
395 continue;
396 }
397 }
398 if (connector->funcs->dpms)
399 connector->funcs->dpms(connector, desired_mode);
400 }
401 drm_modeset_unlock_all(drm);
402
403 drm_helper_resume_force_mode(drm);
404
405 if (changed)
406 drm_kms_helper_hotplug_event(drm);
407 353
408 return 0; 354 return 0;
409} 355}
@@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev,
444 } 390 }
445} 391}
446 392
447static int rockchip_drm_bind(struct device *dev)
448{
449 struct drm_device *drm;
450 int ret;
451
452 drm = drm_dev_alloc(&rockchip_drm_driver, dev);
453 if (!drm)
454 return -ENOMEM;
455
456 ret = drm_dev_register(drm, 0);
457 if (ret)
458 goto err_free;
459
460 dev_set_drvdata(dev, drm);
461
462 return 0;
463
464err_free:
465 drm_dev_unref(drm);
466 return ret;
467}
468
469static void rockchip_drm_unbind(struct device *dev)
470{
471 struct drm_device *drm = dev_get_drvdata(dev);
472
473 drm_dev_unregister(drm);
474 drm_dev_unref(drm);
475 dev_set_drvdata(dev, NULL);
476}
477
478static const struct component_master_ops rockchip_drm_ops = { 393static const struct component_master_ops rockchip_drm_ops = {
479 .bind = rockchip_drm_bind, 394 .bind = rockchip_drm_bind,
480 .unbind = rockchip_drm_unbind, 395 .unbind = rockchip_drm_unbind,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 56f43a364c7f..ea3932940061 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
40 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
41 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc); 42 void (*wait_for_update)(struct drm_crtc *crtc);
43 void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
44};
45
46struct rockchip_atomic_commit {
47 struct work_struct work;
48 struct drm_atomic_state *state;
49 struct drm_device *dev;
50 struct mutex lock;
51}; 43};
52 44
53struct rockchip_crtc_state { 45struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
68 struct drm_fb_helper fbdev_helper; 60 struct drm_fb_helper fbdev_helper;
69 struct drm_gem_object *fbdev_bo; 61 struct drm_gem_object *fbdev_bo;
70 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 62 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
71 63 struct drm_atomic_state *state;
72 struct rockchip_atomic_commit commit;
73}; 64};
74 65
75void rockchip_drm_atomic_work(struct work_struct *work);
76int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 66int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
77 const struct rockchip_crtc_funcs *crtc_funcs); 67 const struct rockchip_crtc_funcs *crtc_funcs);
78void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); 68void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 755cfdba61cd..20f12bc5a386 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -228,87 +228,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
228} 228}
229 229
230static void 230static void
231rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) 231rockchip_atomic_commit_tail(struct drm_atomic_state *state)
232{ 232{
233 struct drm_atomic_state *state = commit->state; 233 struct drm_device *dev = state->dev;
234 struct drm_device *dev = commit->dev;
235 234
236 /*
237 * TODO: do fence wait here.
238 */
239
240 /*
241 * Rockchip crtc support runtime PM, can't update display planes
242 * when crtc is disabled.
243 *
244 * drm_atomic_helper_commit comments detail that:
245 * For drivers supporting runtime PM the recommended sequence is
246 *
247 * drm_atomic_helper_commit_modeset_disables(dev, state);
248 *
249 * drm_atomic_helper_commit_modeset_enables(dev, state);
250 *
251 * drm_atomic_helper_commit_planes(dev, state, true);
252 *
253 * See the kerneldoc entries for these three functions for more details.
254 */
255 drm_atomic_helper_commit_modeset_disables(dev, state); 235 drm_atomic_helper_commit_modeset_disables(dev, state);
256 236
257 drm_atomic_helper_commit_modeset_enables(dev, state); 237 drm_atomic_helper_commit_modeset_enables(dev, state);
258 238
259 drm_atomic_helper_commit_planes(dev, state, true); 239 drm_atomic_helper_commit_planes(dev, state, true);
260 240
241 drm_atomic_helper_commit_hw_done(state);
242
261 rockchip_atomic_wait_for_complete(dev, state); 243 rockchip_atomic_wait_for_complete(dev, state);
262 244
263 drm_atomic_helper_cleanup_planes(dev, state); 245 drm_atomic_helper_cleanup_planes(dev, state);
264
265 drm_atomic_state_free(state);
266}
267
268void rockchip_drm_atomic_work(struct work_struct *work)
269{
270 struct rockchip_atomic_commit *commit = container_of(work,
271 struct rockchip_atomic_commit, work);
272
273 rockchip_atomic_commit_complete(commit);
274} 246}
275 247
276int rockchip_drm_atomic_commit(struct drm_device *dev, 248struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
277 struct drm_atomic_state *state, 249 .atomic_commit_tail = rockchip_atomic_commit_tail,
278 bool nonblock) 250};
279{
280 struct rockchip_drm_private *private = dev->dev_private;
281 struct rockchip_atomic_commit *commit = &private->commit;
282 int ret;
283
284 ret = drm_atomic_helper_prepare_planes(dev, state);
285 if (ret)
286 return ret;
287
288 /* serialize outstanding nonblocking commits */
289 mutex_lock(&commit->lock);
290 flush_work(&commit->work);
291
292 drm_atomic_helper_swap_state(dev, state);
293
294 commit->dev = dev;
295 commit->state = state;
296
297 if (nonblock)
298 schedule_work(&commit->work);
299 else
300 rockchip_atomic_commit_complete(commit);
301
302 mutex_unlock(&commit->lock);
303
304 return 0;
305}
306 251
307static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 252static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
308 .fb_create = rockchip_user_fb_create, 253 .fb_create = rockchip_user_fb_create,
309 .output_poll_changed = rockchip_drm_output_poll_changed, 254 .output_poll_changed = rockchip_drm_output_poll_changed,
310 .atomic_check = drm_atomic_helper_check, 255 .atomic_check = drm_atomic_helper_check,
311 .atomic_commit = rockchip_drm_atomic_commit, 256 .atomic_commit = drm_atomic_helper_commit,
312}; 257};
313 258
314struct drm_framebuffer * 259struct drm_framebuffer *
@@ -339,4 +284,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
339 dev->mode_config.max_height = 4096; 284 dev->mode_config.max_height = 4096;
340 285
341 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; 286 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
287 dev->mode_config.helper_private = &rockchip_mode_config_helpers;
342} 288}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index f261512bb4a0..207e01de6e32 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
108 fbi->screen_size = rk_obj->base.size; 108 fbi->screen_size = rk_obj->base.size;
109 fbi->fix.smem_len = rk_obj->base.size; 109 fbi->fix.smem_len = rk_obj->base.size;
110 110
111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", 111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
112 fb->width, fb->height, fb->depth, rk_obj->kvaddr, 112 fb->width, fb->height, fb->depth, rk_obj->kvaddr,
113 offset, size); 113 offset, size);
114 114
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
156 goto err_drm_fb_helper_fini; 156 goto err_drm_fb_helper_fini;
157 } 157 }
158 158
159 /* disable all the possible outputs/crtcs before entering KMS mode */
160 drm_helper_disable_unused_functions(dev);
161
162 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 159 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
163 if (ret < 0) { 160 if (ret < 0) {
164 dev_err(dev->dev, "Failed to set initial hw config - %d.\n", 161 dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 9c2d8a894093..059e902f872d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
38 &rk_obj->dma_addr, GFP_KERNEL, 38 &rk_obj->dma_addr, GFP_KERNEL,
39 &rk_obj->dma_attrs); 39 &rk_obj->dma_attrs);
40 if (!rk_obj->kvaddr) { 40 if (!rk_obj->kvaddr) {
41 DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); 41 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 44
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1c4d5b5a70a2..6255e5bcd954 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -98,7 +98,9 @@ struct vop_win {
98 const struct vop_win_data *data; 98 const struct vop_win_data *data;
99 struct vop *vop; 99 struct vop *vop;
100 100
101 struct vop_plane_state state; 101 /* protected by dev->event_lock */
102 bool enable;
103 dma_addr_t yrgb_mst;
102}; 104};
103 105
104struct vop { 106struct vop {
@@ -112,6 +114,8 @@ struct vop {
112 bool vsync_work_pending; 114 bool vsync_work_pending;
113 struct completion dsp_hold_completion; 115 struct completion dsp_hold_completion;
114 struct completion wait_update_complete; 116 struct completion wait_update_complete;
117
118 /* protected by dev->event_lock */
115 struct drm_pending_vblank_event *event; 119 struct drm_pending_vblank_event *event;
116 120
117 const struct vop_data *data; 121 const struct vop_data *data;
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
431 struct vop *vop = to_vop(crtc); 435 struct vop *vop = to_vop(crtc);
432 int ret; 436 int ret;
433 437
434 if (vop->is_enabled)
435 return;
436
437 ret = pm_runtime_get_sync(vop->dev); 438 ret = pm_runtime_get_sync(vop->dev);
438 if (ret < 0) { 439 if (ret < 0) {
439 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 440 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
501 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
502 int i; 503 int i;
503 504
504 if (!vop->is_enabled) 505 WARN_ON(vop->event);
505 return;
506 506
507 /* 507 /*
508 * We need to make sure that all windows are disabled before we 508 * We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
553 clk_disable(vop->aclk); 553 clk_disable(vop->aclk);
554 clk_disable(vop->hclk); 554 clk_disable(vop->hclk);
555 pm_runtime_put(vop->dev); 555 pm_runtime_put(vop->dev);
556
557 if (crtc->state->event && !crtc->state->active) {
558 spin_lock_irq(&crtc->dev->event_lock);
559 drm_crtc_send_vblank_event(crtc, crtc->state->event);
560 spin_unlock_irq(&crtc->dev->event_lock);
561
562 crtc->state->event = NULL;
563 }
556} 564}
557 565
558static void vop_plane_destroy(struct drm_plane *plane) 566static void vop_plane_destroy(struct drm_plane *plane)
@@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
618 626
619 ret = drm_plane_helper_check_update(plane, crtc, state->fb, 627 ret = drm_plane_helper_check_update(plane, crtc, state->fb,
620 src, dest, &clip, 628 src, dest, &clip,
629 state->rotation,
621 min_scale, 630 min_scale,
622 max_scale, 631 max_scale,
623 true, true, &visible); 632 true, true, &visible);
@@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
658 if (!old_state->crtc) 667 if (!old_state->crtc)
659 return; 668 return;
660 669
670 spin_lock_irq(&plane->dev->event_lock);
671 vop_win->enable = false;
672 vop_win->yrgb_mst = 0;
673 spin_unlock_irq(&plane->dev->event_lock);
674
661 spin_lock(&vop->reg_lock); 675 spin_lock(&vop->reg_lock);
662 676
663 VOP_WIN_SET(vop, win, enable, 0); 677 VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
692 /* 706 /*
693 * can't update plane when vop is disabled. 707 * can't update plane when vop is disabled.
694 */ 708 */
695 if (!crtc) 709 if (WARN_ON(!crtc))
696 return; 710 return;
697 711
698 if (WARN_ON(!vop->is_enabled)) 712 if (WARN_ON(!vop->is_enabled))
@@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
721 offset += (src->y1 >> 16) * fb->pitches[0]; 735 offset += (src->y1 >> 16) * fb->pitches[0];
722 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; 736 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
723 737
738 spin_lock_irq(&plane->dev->event_lock);
739 vop_win->enable = true;
740 vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
741 spin_unlock_irq(&plane->dev->event_lock);
742
724 spin_lock(&vop->reg_lock); 743 spin_lock(&vop->reg_lock);
725 744
726 VOP_WIN_SET(vop, win, format, vop_plane_state->format); 745 VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
876 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); 895 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
877} 896}
878 897
879static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
880 struct drm_file *file_priv)
881{
882 struct drm_device *drm = crtc->dev;
883 struct vop *vop = to_vop(crtc);
884 struct drm_pending_vblank_event *e;
885 unsigned long flags;
886
887 spin_lock_irqsave(&drm->event_lock, flags);
888 e = vop->event;
889 if (e && e->base.file_priv == file_priv) {
890 vop->event = NULL;
891
892 e->base.destroy(&e->base);
893 file_priv->event_space += sizeof(e->event);
894 }
895 spin_unlock_irqrestore(&drm->event_lock, flags);
896}
897
898static const struct rockchip_crtc_funcs private_crtc_funcs = { 898static const struct rockchip_crtc_funcs private_crtc_funcs = {
899 .enable_vblank = vop_crtc_enable_vblank, 899 .enable_vblank = vop_crtc_enable_vblank,
900 .disable_vblank = vop_crtc_disable_vblank, 900 .disable_vblank = vop_crtc_disable_vblank,
901 .wait_for_update = vop_crtc_wait_for_update, 901 .wait_for_update = vop_crtc_wait_for_update,
902 .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
903}; 902};
904 903
905static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 904static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
931 u16 vact_end = vact_st + vdisplay; 930 u16 vact_end = vact_st + vdisplay;
932 uint32_t val; 931 uint32_t val;
933 932
933 WARN_ON(vop->event);
934
934 vop_enable(crtc); 935 vop_enable(crtc);
935 /* 936 /*
936 * If dclk rate is zero, mean that scanout is stop, 937 * If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1027{ 1028{
1028 struct vop *vop = to_vop(crtc); 1029 struct vop *vop = to_vop(crtc);
1029 1030
1031 spin_lock_irq(&crtc->dev->event_lock);
1030 if (crtc->state->event) { 1032 if (crtc->state->event) {
1031 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1033 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1034 WARN_ON(vop->event);
1032 1035
1033 vop->event = crtc->state->event; 1036 vop->event = crtc->state->event;
1034 crtc->state->event = NULL; 1037 crtc->state->event = NULL;
1035 } 1038 }
1039 spin_unlock_irq(&crtc->dev->event_lock);
1036} 1040}
1037 1041
1038static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1042static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1080,16 +1084,14 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
1080 1084
1081static bool vop_win_pending_is_complete(struct vop_win *vop_win) 1085static bool vop_win_pending_is_complete(struct vop_win *vop_win)
1082{ 1086{
1083 struct drm_plane *plane = &vop_win->base;
1084 struct vop_plane_state *state = to_vop_plane_state(plane->state);
1085 dma_addr_t yrgb_mst; 1087 dma_addr_t yrgb_mst;
1086 1088
1087 if (!state->enable) 1089 if (!vop_win->enable)
1088 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; 1090 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
1089 1091
1090 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); 1092 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1091 1093
1092 return yrgb_mst == state->yrgb_mst; 1094 return yrgb_mst == vop_win->yrgb_mst;
1093} 1095}
1094 1096
1095static void vop_handle_vblank(struct vop *vop) 1097static void vop_handle_vblank(struct vop *vop)
@@ -1104,15 +1106,16 @@ static void vop_handle_vblank(struct vop *vop)
1104 return; 1106 return;
1105 } 1107 }
1106 1108
1109 spin_lock_irqsave(&drm->event_lock, flags);
1107 if (vop->event) { 1110 if (vop->event) {
1108 spin_lock_irqsave(&drm->event_lock, flags);
1109 1111
1110 drm_crtc_send_vblank_event(crtc, vop->event); 1112 drm_crtc_send_vblank_event(crtc, vop->event);
1111 drm_crtc_vblank_put(crtc); 1113 drm_crtc_vblank_put(crtc);
1112 vop->event = NULL; 1114 vop->event = NULL;
1113 1115
1114 spin_unlock_irqrestore(&drm->event_lock, flags);
1115 } 1116 }
1117 spin_unlock_irqrestore(&drm->event_lock, flags);
1118
1116 if (!completion_done(&vop->wait_update_complete)) 1119 if (!completion_done(&vop->wait_update_complete))
1117 complete(&vop->wait_update_complete); 1120 complete(&vop->wait_update_complete);
1118} 1121}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc779d5..6547b1db460a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
441 scrtc->event = NULL; 441 scrtc->event = NULL;
442 if (event) { 442 if (event) {
443 drm_crtc_send_vblank_event(&scrtc->crtc, event); 443 drm_crtc_send_vblank_event(&scrtc->crtc, event);
444 drm_vblank_put(dev, 0); 444 drm_crtc_vblank_put(&scrtc->crtc);
445 } 445 }
446 spin_unlock_irqrestore(&dev->event_lock, flags); 446 spin_unlock_irqrestore(&dev->event_lock, flags);
447} 447}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 467
468 if (event) { 468 if (event) {
469 event->pipe = 0; 469 event->pipe = 0;
470 drm_vblank_get(dev, 0); 470 drm_crtc_vblank_get(&scrtc->crtc);
471 spin_lock_irqsave(&dev->event_lock, flags); 471 spin_lock_irqsave(&dev->event_lock, flags);
472 scrtc->event = event; 472 scrtc->event = event;
473 spin_unlock_irqrestore(&dev->event_lock, flags); 473 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172079..f0492603ea88 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = {
259 | DRIVER_PRIME, 259 | DRIVER_PRIME,
260 .load = shmob_drm_load, 260 .load = shmob_drm_load,
261 .unload = shmob_drm_unload, 261 .unload = shmob_drm_unload,
262 .set_busid = drm_platform_set_busid,
263 .irq_handler = shmob_drm_irq, 262 .irq_handler = shmob_drm_irq,
264 .get_vblank_counter = drm_vblank_no_hw_counter, 263 .get_vblank_counter = drm_vblank_no_hw_counter,
265 .enable_vblank = shmob_drm_enable_vblank, 264 .enable_vblank = shmob_drm_enable_vblank,
266 .disable_vblank = shmob_drm_disable_vblank, 265 .disable_vblank = shmob_drm_disable_vblank,
267 .gem_free_object = drm_gem_cma_free_object, 266 .gem_free_object_unlocked = drm_gem_cma_free_object,
268 .gem_vm_ops = &drm_gem_cma_vm_ops, 267 .gem_vm_ops = &drm_gem_cma_vm_ops,
269 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 268 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
270 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 269 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 93ad8a5704d1..03defda77766 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
316 struct sis_file_private *file_priv = file->driver_priv; 316 struct sis_file_private *file_priv = file->driver_priv;
317 struct sis_memblock *entry, *next; 317 struct sis_memblock *entry, *next;
318 318
319 if (!(file->minor->master && file->master->lock.hw_lock)) 319 if (!(dev->master && file->master->lock.hw_lock))
320 return; 320 return;
321 321
322 drm_legacy_idlelock_take(&file->master->lock); 322 drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 5ad43a1bb260..494ab257f77c 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -7,5 +7,6 @@ config DRM_STI
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 select DRM_PANEL 8 select DRM_PANEL
9 select FW_LOADER 9 select FW_LOADER
10 select SND_SOC_HDMI_CODEC if SND_SOC
10 help 11 help
11 Choose this option to enable DRM on STM stiH41x chipset 12 Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index a516eb869f6f..2da7d6866d5d 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -6,6 +6,8 @@
6 6
7#include "sti_awg_utils.h" 7#include "sti_awg_utils.h"
8 8
9#define AWG_DELAY (-5)
10
9#define AWG_OPCODE_OFFSET 10 11#define AWG_OPCODE_OFFSET 10
10#define AWG_MAX_ARG 0x3ff 12#define AWG_MAX_ARG 0x3ff
11 13
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
125 val = timing->blanking_level; 127 val = timing->blanking_level;
126 ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams); 128 ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
127 129
128 val = timing->trailing_pixels - 1; 130 val = timing->trailing_pixels - 1 + AWG_DELAY;
129 ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams); 131 ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
130 } 132 }
131 133
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 3d2fa3ab33df..794148ff0e57 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = {
55 }, 55 },
56}; 56};
57 57
58int sti_compositor_debufs_init(struct sti_compositor *compo,
59 struct drm_minor *minor)
60{
61 int ret = 0, i;
62
63 for (i = 0; compo->vid[i]; i++) {
64 ret = vid_debugfs_init(compo->vid[i], minor);
65 if (ret)
66 return ret;
67 }
68
69 for (i = 0; compo->mixer[i]; i++) {
70 ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
71 if (ret)
72 return ret;
73 }
74
75 return 0;
76}
77
58static int sti_compositor_bind(struct device *dev, 78static int sti_compositor_bind(struct device *dev,
59 struct device *master, 79 struct device *master,
60 void *data) 80 void *data)
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 1a4a73dab11e..24444ef42a98 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -81,4 +81,7 @@ struct sti_compositor {
81 struct notifier_block vtg_vblank_nb; 81 struct notifier_block vtg_vblank_nb;
82}; 82};
83 83
84int sti_compositor_debufs_init(struct sti_compositor *compo,
85 struct drm_minor *minor);
86
84#endif 87#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 505620c7c2c8..c7d734dc3cf4 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -23,22 +23,11 @@
23static void sti_crtc_enable(struct drm_crtc *crtc) 23static void sti_crtc_enable(struct drm_crtc *crtc)
24{ 24{
25 struct sti_mixer *mixer = to_sti_mixer(crtc); 25 struct sti_mixer *mixer = to_sti_mixer(crtc);
26 struct device *dev = mixer->dev;
27 struct sti_compositor *compo = dev_get_drvdata(dev);
28 26
29 DRM_DEBUG_DRIVER("\n"); 27 DRM_DEBUG_DRIVER("\n");
30 28
31 mixer->status = STI_MIXER_READY; 29 mixer->status = STI_MIXER_READY;
32 30
33 /* Prepare and enable the compo IP clock */
34 if (mixer->id == STI_MIXER_MAIN) {
35 if (clk_prepare_enable(compo->clk_compo_main))
36 DRM_INFO("Failed to prepare/enable compo_main clk\n");
37 } else {
38 if (clk_prepare_enable(compo->clk_compo_aux))
39 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
40 }
41
42 drm_crtc_vblank_on(crtc); 31 drm_crtc_vblank_on(crtc);
43} 32}
44 33
@@ -51,24 +40,14 @@ static void sti_crtc_disabling(struct drm_crtc *crtc)
51 mixer->status = STI_MIXER_DISABLING; 40 mixer->status = STI_MIXER_DISABLING;
52} 41}
53 42
54static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
55 const struct drm_display_mode *mode,
56 struct drm_display_mode *adjusted_mode)
57{
58 /* accept the provided drm_display_mode, do not fix it up */
59 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
60 return true;
61}
62
63static int 43static int
64sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 44sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
65{ 45{
66 struct sti_mixer *mixer = to_sti_mixer(crtc); 46 struct sti_mixer *mixer = to_sti_mixer(crtc);
67 struct device *dev = mixer->dev; 47 struct device *dev = mixer->dev;
68 struct sti_compositor *compo = dev_get_drvdata(dev); 48 struct sti_compositor *compo = dev_get_drvdata(dev);
69 struct clk *clk; 49 struct clk *compo_clk, *pix_clk;
70 int rate = mode->clock * 1000; 50 int rate = mode->clock * 1000;
71 int res;
72 51
73 DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", 52 DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
74 crtc->base.id, sti_mixer_to_str(mixer), 53 crtc->base.id, sti_mixer_to_str(mixer),
@@ -83,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
83 mode->vsync_start, mode->vsync_end, 62 mode->vsync_start, mode->vsync_end,
84 mode->vtotal, mode->type, mode->flags); 63 mode->vtotal, mode->type, mode->flags);
85 64
86 /* Set rate and prepare/enable pixel clock */ 65 if (mixer->id == STI_MIXER_MAIN) {
87 if (mixer->id == STI_MIXER_MAIN) 66 compo_clk = compo->clk_compo_main;
88 clk = compo->clk_pix_main; 67 pix_clk = compo->clk_pix_main;
89 else 68 } else {
90 clk = compo->clk_pix_aux; 69 compo_clk = compo->clk_compo_aux;
70 pix_clk = compo->clk_pix_aux;
71 }
72
73 /* Prepare and enable the compo IP clock */
74 if (clk_prepare_enable(compo_clk)) {
75 DRM_INFO("Failed to prepare/enable compositor clk\n");
76 goto compo_error;
77 }
91 78
92 res = clk_set_rate(clk, rate); 79 /* Set rate and prepare/enable pixel clock */
93 if (res < 0) { 80 if (clk_set_rate(pix_clk, rate) < 0) {
94 DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate); 81 DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
95 return -EINVAL; 82 goto pix_error;
96 } 83 }
97 if (clk_prepare_enable(clk)) { 84 if (clk_prepare_enable(pix_clk)) {
98 DRM_ERROR("Failed to prepare/enable pix clk\n"); 85 DRM_ERROR("Failed to prepare/enable pix clk\n");
99 return -EINVAL; 86 goto pix_error;
100 } 87 }
101 88
102 sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? 89 sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
103 compo->vtg_main : compo->vtg_aux, &crtc->mode); 90 compo->vtg_main : compo->vtg_aux, &crtc->mode);
104 91
105 res = sti_mixer_active_video_area(mixer, &crtc->mode); 92 if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
106 if (res) {
107 DRM_ERROR("Can't set active video area\n"); 93 DRM_ERROR("Can't set active video area\n");
108 return -EINVAL; 94 goto mixer_error;
109 } 95 }
110 96
111 return res; 97 return 0;
98
99mixer_error:
100 clk_disable_unprepare(pix_clk);
101pix_error:
102 clk_disable_unprepare(compo_clk);
103compo_error:
104 return -EINVAL;
112} 105}
113 106
114static void sti_crtc_disable(struct drm_crtc *crtc) 107static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -139,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
139static void 132static void
140sti_crtc_mode_set_nofb(struct drm_crtc *crtc) 133sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
141{ 134{
142 sti_crtc_enable(crtc);
143 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 135 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
144} 136}
145 137
@@ -230,10 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
230static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 222static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
231 .enable = sti_crtc_enable, 223 .enable = sti_crtc_enable,
232 .disable = sti_crtc_disabling, 224 .disable = sti_crtc_disabling,
233 .mode_fixup = sti_crtc_mode_fixup,
234 .mode_set = drm_helper_crtc_mode_set,
235 .mode_set_nofb = sti_crtc_mode_set_nofb, 225 .mode_set_nofb = sti_crtc_mode_set_nofb,
236 .mode_set_base = drm_helper_crtc_mode_set_base,
237 .atomic_begin = sti_crtc_atomic_begin, 226 .atomic_begin = sti_crtc_atomic_begin,
238 .atomic_flush = sti_crtc_atomic_flush, 227 .atomic_flush = sti_crtc_atomic_flush,
239}; 228};
@@ -341,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
341 } 330 }
342} 331}
343 332
333static int sti_crtc_late_register(struct drm_crtc *crtc)
334{
335 struct sti_mixer *mixer = to_sti_mixer(crtc);
336 struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
337
338 if (drm_crtc_index(crtc) == 0)
339 return sti_compositor_debufs_init(compo, crtc->dev->primary);
340
341 return 0;
342}
343
344static const struct drm_crtc_funcs sti_crtc_funcs = { 344static const struct drm_crtc_funcs sti_crtc_funcs = {
345 .set_config = drm_atomic_helper_set_config, 345 .set_config = drm_atomic_helper_set_config,
346 .page_flip = drm_atomic_helper_page_flip, 346 .page_flip = drm_atomic_helper_page_flip,
@@ -349,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
349 .reset = drm_atomic_helper_crtc_reset, 349 .reset = drm_atomic_helper_crtc_reset,
350 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 350 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
351 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 351 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
352 .late_register = sti_crtc_late_register,
352}; 353};
353 354
354bool sti_crtc_is_main(struct drm_crtc *crtc) 355bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e990299735c..a263bbba4119 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
105{ 105{
106 struct drm_info_node *node = s->private; 106 struct drm_info_node *node = s->private;
107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; 107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
108 struct drm_device *dev = node->minor->dev;
109 int ret;
110
111 ret = mutex_lock_interruptible(&dev->struct_mutex);
112 if (ret)
113 return ret;
114 108
115 seq_printf(s, "%s: (vaddr = 0x%p)", 109 seq_printf(s, "%s: (vaddr = 0x%p)",
116 sti_plane_to_str(&cursor->plane), cursor->regs); 110 sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
129 DBGFS_DUMP(CUR_AWE); 123 DBGFS_DUMP(CUR_AWE);
130 seq_puts(s, "\n"); 124 seq_puts(s, "\n");
131 125
132 mutex_unlock(&dev->struct_mutex);
133 return 0; 126 return 0;
134} 127}
135 128
@@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
336 .atomic_disable = sti_cursor_atomic_disable, 329 .atomic_disable = sti_cursor_atomic_disable,
337}; 330};
338 331
332static void sti_cursor_destroy(struct drm_plane *drm_plane)
333{
334 DRM_DEBUG_DRIVER("\n");
335
336 drm_plane_helper_disable(drm_plane);
337 drm_plane_cleanup(drm_plane);
338}
339
340static int sti_cursor_late_register(struct drm_plane *drm_plane)
341{
342 struct sti_plane *plane = to_sti_plane(drm_plane);
343 struct sti_cursor *cursor = to_sti_cursor(plane);
344
345 return cursor_debugfs_init(cursor, drm_plane->dev->primary);
346}
347
348struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
349 .update_plane = drm_atomic_helper_update_plane,
350 .disable_plane = drm_atomic_helper_disable_plane,
351 .destroy = sti_cursor_destroy,
352 .set_property = sti_plane_set_property,
353 .reset = drm_atomic_helper_plane_reset,
354 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
355 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
356 .late_register = sti_cursor_late_register,
357};
358
339struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 359struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
340 struct device *dev, int desc, 360 struct device *dev, int desc,
341 void __iomem *baseaddr, 361 void __iomem *baseaddr,
@@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
370 390
371 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, 391 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
372 possible_crtcs, 392 possible_crtcs,
373 &sti_plane_helpers_funcs, 393 &sti_cursor_plane_helpers_funcs,
374 cursor_supported_formats, 394 cursor_supported_formats,
375 ARRAY_SIZE(cursor_supported_formats), 395 ARRAY_SIZE(cursor_supported_formats),
376 DRM_PLANE_TYPE_CURSOR, NULL); 396 DRM_PLANE_TYPE_CURSOR, NULL);
@@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
384 404
385 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); 405 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
386 406
387 if (cursor_debugfs_init(cursor, drm_dev->primary))
388 DRM_ERROR("CURSOR debugfs setup failed\n");
389
390 return &cursor->plane.drm_plane; 407 return &cursor->plane.drm_plane;
391 408
392err_plane: 409err_plane:
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72294..96bd3d08b2d4 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
72 struct drm_info_node *node = s->private; 72 struct drm_info_node *node = s->private;
73 struct drm_device *dev = node->minor->dev; 73 struct drm_device *dev = node->minor->dev;
74 struct drm_plane *p; 74 struct drm_plane *p;
75 int ret;
76
77 ret = mutex_lock_interruptible(&dev->struct_mutex);
78 if (ret)
79 return ret;
80 75
81 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 76 list_for_each_entry(p, &dev->mode_config.plane_list, head) {
82 struct sti_plane *plane = to_sti_plane(p); 77 struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
86 plane->fps_info.fips_str); 81 plane->fps_info.fips_str);
87 } 82 }
88 83
89 mutex_unlock(&dev->struct_mutex);
90 return 0; 84 return 0;
91} 85}
92 86
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
221 * the software side now. 215 * the software side now.
222 */ 216 */
223 217
224 drm_atomic_helper_swap_state(drm, state); 218 drm_atomic_helper_swap_state(state, true);
225 219
226 if (nonblock) 220 if (nonblock)
227 sti_atomic_schedule(private, state); 221 sti_atomic_schedule(private, state);
@@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm,
232 return 0; 226 return 0;
233} 227}
234 228
229static void sti_output_poll_changed(struct drm_device *ddev)
230{
231 struct sti_private *private = ddev->dev_private;
232
233 if (!ddev->mode_config.num_connector)
234 return;
235
236 if (private->fbdev) {
237 drm_fbdev_cma_hotplug_event(private->fbdev);
238 return;
239 }
240
241 private->fbdev = drm_fbdev_cma_init(ddev, 32,
242 ddev->mode_config.num_crtc,
243 ddev->mode_config.num_connector);
244 if (IS_ERR(private->fbdev))
245 private->fbdev = NULL;
246}
247
235static const struct drm_mode_config_funcs sti_mode_config_funcs = { 248static const struct drm_mode_config_funcs sti_mode_config_funcs = {
236 .fb_create = drm_fb_cma_create, 249 .fb_create = drm_fb_cma_create,
250 .output_poll_changed = sti_output_poll_changed,
237 .atomic_check = drm_atomic_helper_check, 251 .atomic_check = drm_atomic_helper_check,
238 .atomic_commit = sti_atomic_commit, 252 .atomic_commit = sti_atomic_commit,
239}; 253};
@@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev)
254 dev->mode_config.funcs = &sti_mode_config_funcs; 268 dev->mode_config.funcs = &sti_mode_config_funcs;
255} 269}
256 270
257static int sti_load(struct drm_device *dev, unsigned long flags)
258{
259 struct sti_private *private;
260 int ret;
261
262 private = kzalloc(sizeof(*private), GFP_KERNEL);
263 if (!private) {
264 DRM_ERROR("Failed to allocate private\n");
265 return -ENOMEM;
266 }
267 dev->dev_private = (void *)private;
268 private->drm_dev = dev;
269
270 mutex_init(&private->commit.lock);
271 INIT_WORK(&private->commit.work, sti_atomic_work);
272
273 drm_mode_config_init(dev);
274 drm_kms_helper_poll_init(dev);
275
276 sti_mode_config_init(dev);
277
278 ret = component_bind_all(dev->dev, dev);
279 if (ret) {
280 drm_kms_helper_poll_fini(dev);
281 drm_mode_config_cleanup(dev);
282 kfree(private);
283 return ret;
284 }
285
286 drm_mode_config_reset(dev);
287
288 drm_helper_disable_unused_functions(dev);
289 drm_fbdev_cma_init(dev, 32,
290 dev->mode_config.num_crtc,
291 dev->mode_config.num_connector);
292
293 return 0;
294}
295
296static const struct file_operations sti_driver_fops = { 271static const struct file_operations sti_driver_fops = {
297 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
298 .open = drm_open, 273 .open = drm_open,
@@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = {
309static struct drm_driver sti_driver = { 284static struct drm_driver sti_driver = {
310 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 285 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
311 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, 286 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
312 .load = sti_load, 287 .gem_free_object_unlocked = drm_gem_cma_free_object,
313 .gem_free_object = drm_gem_cma_free_object,
314 .gem_vm_ops = &drm_gem_cma_vm_ops, 288 .gem_vm_ops = &drm_gem_cma_vm_ops,
315 .dumb_create = drm_gem_cma_dumb_create, 289 .dumb_create = drm_gem_cma_dumb_create,
316 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 290 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data)
346 return dev->of_node == data; 320 return dev->of_node == data;
347} 321}
348 322
323static int sti_init(struct drm_device *ddev)
324{
325 struct sti_private *private;
326
327 private = kzalloc(sizeof(*private), GFP_KERNEL);
328 if (!private)
329 return -ENOMEM;
330
331 ddev->dev_private = (void *)private;
332 dev_set_drvdata(ddev->dev, ddev);
333 private->drm_dev = ddev;
334
335 mutex_init(&private->commit.lock);
336 INIT_WORK(&private->commit.work, sti_atomic_work);
337
338 drm_mode_config_init(ddev);
339
340 sti_mode_config_init(ddev);
341
342 drm_kms_helper_poll_init(ddev);
343
344 return 0;
345}
346
347static void sti_cleanup(struct drm_device *ddev)
348{
349 struct sti_private *private = ddev->dev_private;
350
351 if (private->fbdev) {
352 drm_fbdev_cma_fini(private->fbdev);
353 private->fbdev = NULL;
354 }
355
356 drm_kms_helper_poll_fini(ddev);
357 drm_vblank_cleanup(ddev);
358 kfree(private);
359 ddev->dev_private = NULL;
360}
361
349static int sti_bind(struct device *dev) 362static int sti_bind(struct device *dev)
350{ 363{
351 return drm_platform_init(&sti_driver, to_platform_device(dev)); 364 struct drm_device *ddev;
365 int ret;
366
367 ddev = drm_dev_alloc(&sti_driver, dev);
368 if (!ddev)
369 return -ENOMEM;
370
371 ddev->platformdev = to_platform_device(dev);
372
373 ret = sti_init(ddev);
374 if (ret)
375 goto err_drm_dev_unref;
376
377 ret = component_bind_all(ddev->dev, ddev);
378 if (ret)
379 goto err_cleanup;
380
381 ret = drm_dev_register(ddev, 0);
382 if (ret)
383 goto err_register;
384
385 drm_mode_config_reset(ddev);
386
387 return 0;
388
389err_register:
390 drm_mode_config_cleanup(ddev);
391err_cleanup:
392 sti_cleanup(ddev);
393err_drm_dev_unref:
394 drm_dev_unref(ddev);
395 return ret;
352} 396}
353 397
354static void sti_unbind(struct device *dev) 398static void sti_unbind(struct device *dev)
355{ 399{
356 drm_put_dev(dev_get_drvdata(dev)); 400 struct drm_device *ddev = dev_get_drvdata(dev);
401
402 drm_dev_unregister(ddev);
403 sti_cleanup(ddev);
404 drm_dev_unref(ddev);
357} 405}
358 406
359static const struct component_master_ops sti_ops = { 407static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 30ddc20841c3..78ebe5e30f53 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -24,6 +24,7 @@ struct sti_private {
24 struct sti_compositor *compo; 24 struct sti_compositor *compo;
25 struct drm_property *plane_zorder_property; 25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev; 26 struct drm_device *drm_dev;
27 struct drm_fbdev_cma *fbdev;
27 28
28 struct { 29 struct {
29 struct drm_atomic_state *state; 30 struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f76632002c..ec3108074350 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
177{ 177{
178 struct drm_info_node *node = s->private; 178 struct drm_info_node *node = s->private;
179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; 179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
180 struct drm_device *dev = node->minor->dev;
181 int ret;
182
183 ret = mutex_lock_interruptible(&dev->struct_mutex);
184 if (ret)
185 return ret;
186 180
187 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); 181 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
188 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); 182 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
193 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); 187 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
194 seq_puts(s, "\n"); 188 seq_puts(s, "\n");
195 189
196 mutex_unlock(&dev->struct_mutex);
197 return 0; 190 return 0;
198} 191}
199 192
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
384 return MODE_OK; 377 return MODE_OK;
385} 378}
386 379
387struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
388{
389 struct sti_dvo_connector *dvo_connector
390 = to_sti_dvo_connector(connector);
391
392 /* Best encoder is the one associated during connector creation */
393 return dvo_connector->encoder;
394}
395
396static const 380static const
397struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { 381struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
398 .get_modes = sti_dvo_connector_get_modes, 382 .get_modes = sti_dvo_connector_get_modes,
399 .mode_valid = sti_dvo_connector_mode_valid, 383 .mode_valid = sti_dvo_connector_mode_valid,
400 .best_encoder = sti_dvo_best_encoder,
401}; 384};
402 385
403static enum drm_connector_status 386static enum drm_connector_status
@@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
421 return connector_status_disconnected; 404 return connector_status_disconnected;
422} 405}
423 406
424static void sti_dvo_connector_destroy(struct drm_connector *connector) 407static int sti_dvo_late_register(struct drm_connector *connector)
425{ 408{
426 struct sti_dvo_connector *dvo_connector 409 struct sti_dvo_connector *dvo_connector
427 = to_sti_dvo_connector(connector); 410 = to_sti_dvo_connector(connector);
411 struct sti_dvo *dvo = dvo_connector->dvo;
428 412
429 drm_connector_unregister(connector); 413 if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
430 drm_connector_cleanup(connector); 414 DRM_ERROR("DVO debugfs setup failed\n");
431 kfree(dvo_connector); 415 return -EINVAL;
416 }
417
418 return 0;
432} 419}
433 420
434static const struct drm_connector_funcs sti_dvo_connector_funcs = { 421static const struct drm_connector_funcs sti_dvo_connector_funcs = {
435 .dpms = drm_atomic_helper_connector_dpms, 422 .dpms = drm_atomic_helper_connector_dpms,
436 .fill_modes = drm_helper_probe_single_connector_modes, 423 .fill_modes = drm_helper_probe_single_connector_modes,
437 .detect = sti_dvo_connector_detect, 424 .detect = sti_dvo_connector_detect,
438 .destroy = sti_dvo_connector_destroy, 425 .destroy = drm_connector_cleanup,
439 .reset = drm_atomic_helper_connector_reset, 426 .reset = drm_atomic_helper_connector_reset,
440 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 427 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
441 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 428 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
429 .late_register = sti_dvo_late_register,
442}; 430};
443 431
444static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev) 432static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
@@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
509 drm_connector_helper_add(drm_connector, 497 drm_connector_helper_add(drm_connector,
510 &sti_dvo_connector_helper_funcs); 498 &sti_dvo_connector_helper_funcs);
511 499
512 err = drm_connector_register(drm_connector);
513 if (err)
514 goto err_connector;
515
516 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 500 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
517 if (err) { 501 if (err) {
518 DRM_ERROR("Failed to attach a connector to a encoder\n"); 502 DRM_ERROR("Failed to attach a connector to a encoder\n");
519 goto err_sysfs; 503 goto err_sysfs;
520 } 504 }
521 505
522 if (dvo_debugfs_init(dvo, drm_dev->primary))
523 DRM_ERROR("DVO debugfs setup failed\n");
524
525 return 0; 506 return 0;
526 507
527err_sysfs: 508err_sysfs:
528 drm_connector_unregister(drm_connector);
529err_connector:
530 drm_bridge_remove(bridge); 509 drm_bridge_remove(bridge);
531 drm_connector_cleanup(drm_connector);
532 return -EINVAL; 510 return -EINVAL;
533} 511}
534 512
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da197..bf63086a3dc8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
208{ 208{
209 struct drm_info_node *node = s->private; 209 struct drm_info_node *node = s->private;
210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
211 struct drm_device *dev = node->minor->dev;
212 struct drm_plane *drm_plane = &gdp->plane.drm_plane; 211 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
213 struct drm_crtc *crtc = drm_plane->crtc; 212 struct drm_crtc *crtc = drm_plane->crtc;
214 int ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219 213
220 seq_printf(s, "%s: (vaddr = 0x%p)", 214 seq_printf(s, "%s: (vaddr = 0x%p)",
221 sti_plane_to_str(&gdp->plane), gdp->regs); 215 sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
248 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", 242 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
249 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); 243 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
250 244
251 mutex_unlock(&dev->struct_mutex);
252 return 0; 245 return 0;
253} 246}
254 247
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
279{ 272{
280 struct drm_info_node *node = s->private; 273 struct drm_info_node *node = s->private;
281 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 274 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
282 struct drm_device *dev = node->minor->dev;
283 unsigned int b; 275 unsigned int b;
284 int ret;
285
286 ret = mutex_lock_interruptible(&dev->struct_mutex);
287 if (ret)
288 return ret;
289 276
290 for (b = 0; b < GDP_NODE_NB_BANK; b++) { 277 for (b = 0; b < GDP_NODE_NB_BANK; b++) {
291 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); 278 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
294 gdp_node_dump_node(s, gdp->node_list[b].btm_field); 281 gdp_node_dump_node(s, gdp->node_list[b].btm_field);
295 } 282 }
296 283
297 mutex_unlock(&dev->struct_mutex);
298 return 0; 284 return 0;
299} 285}
300 286
@@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
880 .atomic_disable = sti_gdp_atomic_disable, 866 .atomic_disable = sti_gdp_atomic_disable,
881}; 867};
882 868
869static void sti_gdp_destroy(struct drm_plane *drm_plane)
870{
871 DRM_DEBUG_DRIVER("\n");
872
873 drm_plane_helper_disable(drm_plane);
874 drm_plane_cleanup(drm_plane);
875}
876
877static int sti_gdp_late_register(struct drm_plane *drm_plane)
878{
879 struct sti_plane *plane = to_sti_plane(drm_plane);
880 struct sti_gdp *gdp = to_sti_gdp(plane);
881
882 return gdp_debugfs_init(gdp, drm_plane->dev->primary);
883}
884
885struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
886 .update_plane = drm_atomic_helper_update_plane,
887 .disable_plane = drm_atomic_helper_disable_plane,
888 .destroy = sti_gdp_destroy,
889 .set_property = sti_plane_set_property,
890 .reset = drm_atomic_helper_plane_reset,
891 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
892 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
893 .late_register = sti_gdp_late_register,
894};
895
883struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 896struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
884 struct device *dev, int desc, 897 struct device *dev, int desc,
885 void __iomem *baseaddr, 898 void __iomem *baseaddr,
@@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
906 919
907 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, 920 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
908 possible_crtcs, 921 possible_crtcs,
909 &sti_plane_helpers_funcs, 922 &sti_gdp_plane_helpers_funcs,
910 gdp_supported_formats, 923 gdp_supported_formats,
911 ARRAY_SIZE(gdp_supported_formats), 924 ARRAY_SIZE(gdp_supported_formats),
912 type, NULL); 925 type, NULL);
@@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
919 932
920 sti_plane_init_property(&gdp->plane, type); 933 sti_plane_init_property(&gdp->plane, type);
921 934
922 if (gdp_debugfs_init(gdp, drm_dev->primary))
923 DRM_ERROR("GDP debugfs setup failed\n");
924
925 return &gdp->plane.drm_plane; 935 return &gdp->plane.drm_plane;
926 936
927err: 937err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cdf09..8505569f75de 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
376{ 376{
377 struct drm_info_node *node = s->private; 377 struct drm_info_node *node = s->private;
378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; 378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
379 struct drm_device *dev = node->minor->dev;
380 int ret;
381
382 ret = mutex_lock_interruptible(&dev->struct_mutex);
383 if (ret)
384 return ret;
385 379
386 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); 380 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
387 DBGFS_DUMP(HDA_ANA_CFG); 381 DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
397 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); 391 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
398 seq_puts(s, "\n"); 392 seq_puts(s, "\n");
399 393
400 mutex_unlock(&dev->struct_mutex);
401 return 0; 394 return 0;
402} 395}
403 396
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
676 return MODE_OK; 669 return MODE_OK;
677} 670}
678 671
679struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
680{
681 struct sti_hda_connector *hda_connector
682 = to_sti_hda_connector(connector);
683
684 /* Best encoder is the one associated during connector creation */
685 return hda_connector->encoder;
686}
687
688static const 672static const
689struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { 673struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
690 .get_modes = sti_hda_connector_get_modes, 674 .get_modes = sti_hda_connector_get_modes,
691 .mode_valid = sti_hda_connector_mode_valid, 675 .mode_valid = sti_hda_connector_mode_valid,
692 .best_encoder = sti_hda_best_encoder,
693}; 676};
694 677
695static enum drm_connector_status 678static enum drm_connector_status
@@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force)
698 return connector_status_connected; 681 return connector_status_connected;
699} 682}
700 683
701static void sti_hda_connector_destroy(struct drm_connector *connector) 684static int sti_hda_late_register(struct drm_connector *connector)
702{ 685{
703 struct sti_hda_connector *hda_connector 686 struct sti_hda_connector *hda_connector
704 = to_sti_hda_connector(connector); 687 = to_sti_hda_connector(connector);
688 struct sti_hda *hda = hda_connector->hda;
689
690 if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
691 DRM_ERROR("HDA debugfs setup failed\n");
692 return -EINVAL;
693 }
705 694
706 drm_connector_unregister(connector); 695 return 0;
707 drm_connector_cleanup(connector);
708 kfree(hda_connector);
709} 696}
710 697
711static const struct drm_connector_funcs sti_hda_connector_funcs = { 698static const struct drm_connector_funcs sti_hda_connector_funcs = {
712 .dpms = drm_atomic_helper_connector_dpms, 699 .dpms = drm_atomic_helper_connector_dpms,
713 .fill_modes = drm_helper_probe_single_connector_modes, 700 .fill_modes = drm_helper_probe_single_connector_modes,
714 .detect = sti_hda_connector_detect, 701 .detect = sti_hda_connector_detect,
715 .destroy = sti_hda_connector_destroy, 702 .destroy = drm_connector_cleanup,
716 .reset = drm_atomic_helper_connector_reset, 703 .reset = drm_atomic_helper_connector_reset,
717 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 704 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
718 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 705 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
706 .late_register = sti_hda_late_register,
719}; 707};
720 708
721static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) 709static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
@@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
773 drm_connector_helper_add(drm_connector, 761 drm_connector_helper_add(drm_connector,
774 &sti_hda_connector_helper_funcs); 762 &sti_hda_connector_helper_funcs);
775 763
776 err = drm_connector_register(drm_connector);
777 if (err)
778 goto err_connector;
779
780 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 764 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
781 if (err) { 765 if (err) {
782 DRM_ERROR("Failed to attach a connector to a encoder\n"); 766 DRM_ERROR("Failed to attach a connector to a encoder\n");
@@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
786 /* force to disable hd dacs at startup */ 770 /* force to disable hd dacs at startup */
787 hda_enable_hd_dacs(hda, false); 771 hda_enable_hd_dacs(hda, false);
788 772
789 if (hda_debugfs_init(hda, drm_dev->primary))
790 DRM_ERROR("HDA debugfs setup failed\n");
791
792 return 0; 773 return 0;
793 774
794err_sysfs: 775err_sysfs:
795 drm_connector_unregister(drm_connector); 776 drm_bridge_remove(bridge);
796err_connector:
797 drm_connector_cleanup(drm_connector);
798 return -EINVAL; 777 return -EINVAL;
799} 778}
800 779
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd5b9..fedc17f98d9b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -18,6 +18,8 @@
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
20 20
21#include <sound/hdmi-codec.h>
22
21#include "sti_hdmi.h" 23#include "sti_hdmi.h"
22#include "sti_hdmi_tx3g4c28phy.h" 24#include "sti_hdmi_tx3g4c28phy.h"
23#include "sti_hdmi_tx3g0c55phy.h" 25#include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
35#define HDMI_DFLT_CHL0_DAT 0x0110 37#define HDMI_DFLT_CHL0_DAT 0x0110
36#define HDMI_DFLT_CHL1_DAT 0x0114 38#define HDMI_DFLT_CHL1_DAT 0x0114
37#define HDMI_DFLT_CHL2_DAT 0x0118 39#define HDMI_DFLT_CHL2_DAT 0x0118
40#define HDMI_AUDIO_CFG 0x0200
41#define HDMI_SPDIF_FIFO_STATUS 0x0204
38#define HDMI_SW_DI_1_HEAD_WORD 0x0210 42#define HDMI_SW_DI_1_HEAD_WORD 0x0210
39#define HDMI_SW_DI_1_PKT_WORD0 0x0214 43#define HDMI_SW_DI_1_PKT_WORD0 0x0214
40#define HDMI_SW_DI_1_PKT_WORD1 0x0218 44#define HDMI_SW_DI_1_PKT_WORD1 0x0218
@@ -44,6 +48,9 @@
44#define HDMI_SW_DI_1_PKT_WORD5 0x0228 48#define HDMI_SW_DI_1_PKT_WORD5 0x0228
45#define HDMI_SW_DI_1_PKT_WORD6 0x022C 49#define HDMI_SW_DI_1_PKT_WORD6 0x022C
46#define HDMI_SW_DI_CFG 0x0230 50#define HDMI_SW_DI_CFG 0x0230
51#define HDMI_SAMPLE_FLAT_MASK 0x0244
52#define HDMI_AUDN 0x0400
53#define HDMI_AUD_CTS 0x0404
47#define HDMI_SW_DI_2_HEAD_WORD 0x0600 54#define HDMI_SW_DI_2_HEAD_WORD 0x0600
48#define HDMI_SW_DI_2_PKT_WORD0 0x0604 55#define HDMI_SW_DI_2_PKT_WORD0 0x0604
49#define HDMI_SW_DI_2_PKT_WORD1 0x0608 56#define HDMI_SW_DI_2_PKT_WORD1 0x0608
@@ -103,6 +110,7 @@
103#define HDMI_INT_DLL_LCK BIT(5) 110#define HDMI_INT_DLL_LCK BIT(5)
104#define HDMI_INT_NEW_FRAME BIT(6) 111#define HDMI_INT_NEW_FRAME BIT(6)
105#define HDMI_INT_GENCTRL_PKT BIT(7) 112#define HDMI_INT_GENCTRL_PKT BIT(7)
113#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
106#define HDMI_INT_SINK_TERM_PRESENT BIT(11) 114#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
107 115
108#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \ 116#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
@@ -111,6 +119,7 @@
111 | HDMI_INT_GLOBAL) 119 | HDMI_INT_GLOBAL)
112 120
113#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \ 121#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
122 | HDMI_INT_AUDIO_FIFO_XRUN \
114 | HDMI_INT_GENCTRL_PKT \ 123 | HDMI_INT_GENCTRL_PKT \
115 | HDMI_INT_NEW_FRAME \ 124 | HDMI_INT_NEW_FRAME \
116 | HDMI_INT_DLL_LCK \ 125 | HDMI_INT_DLL_LCK \
@@ -121,6 +130,27 @@
121 130
122#define HDMI_STA_SW_RST BIT(1) 131#define HDMI_STA_SW_RST BIT(1)
123 132
133#define HDMI_AUD_CFG_8CH BIT(0)
134#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
135#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
136#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
137#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
138#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
139#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
140#define HDMI_AUD_CFG_CH12_VALID BIT(28)
141#define HDMI_AUD_CFG_CH34_VALID BIT(29)
142#define HDMI_AUD_CFG_CH56_VALID BIT(30)
143#define HDMI_AUD_CFG_CH78_VALID BIT(31)
144
145/* sample flat mask */
146#define HDMI_SAMPLE_FLAT_NO 0
147#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
148#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
149#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
150#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
151#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
152 HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
153
124#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) 154#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
125#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) 155#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
126#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) 156#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
171 wake_up_interruptible(&hdmi->wait_event); 201 wake_up_interruptible(&hdmi->wait_event);
172 } 202 }
173 203
204 /* Audio FIFO underrun IRQ */
205 if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
206 DRM_INFO("Warning: audio FIFO underrun occurs!");
207
174 return IRQ_HANDLED; 208 return IRQ_HANDLED;
175} 209}
176 210
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
441 */ 475 */
442static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) 476static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
443{ 477{
444 struct hdmi_audio_infoframe infofame; 478 struct hdmi_audio_params *audio = &hdmi->audio;
445 u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; 479 u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
446 int ret; 480 int ret, val;
447 481
448 ret = hdmi_audio_infoframe_init(&infofame); 482 DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
449 if (ret < 0) { 483 audio->enabled ? "enable" : "disable");
450 DRM_ERROR("failed to setup audio infoframe: %d\n", ret); 484 if (audio->enabled) {
451 return ret; 485 /* set audio parameters stored*/
452 } 486 ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
453 487 sizeof(buffer));
454 infofame.channels = 2; 488 if (ret < 0) {
455 489 DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
456 ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); 490 return ret;
457 if (ret < 0) { 491 }
458 DRM_ERROR("failed to pack audio infoframe: %d\n", ret); 492 hdmi_infoframe_write_infopack(hdmi, buffer, ret);
459 return ret; 493 } else {
494 /*disable audio info frame transmission */
495 val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
496 val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
497 HDMI_IFRAME_SLOT_AUDIO);
498 hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
460 } 499 }
461 500
462 hdmi_infoframe_write_infopack(hdmi, buffer, ret);
463
464 return 0; 501 return 0;
465} 502}
466 503
@@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
628{ 665{
629 struct drm_info_node *node = s->private; 666 struct drm_info_node *node = s->private;
630 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; 667 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
631 struct drm_device *dev = node->minor->dev;
632 int ret;
633
634 ret = mutex_lock_interruptible(&dev->struct_mutex);
635 if (ret)
636 return ret;
637 668
638 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); 669 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
639 DBGFS_DUMP("\n", HDMI_CFG); 670 DBGFS_DUMP("\n", HDMI_CFG);
@@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
656 DBGFS_DUMP("", HDMI_SW_DI_CFG); 687 DBGFS_DUMP("", HDMI_SW_DI_CFG);
657 hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG)); 688 hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
658 689
690 DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
691 DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
692 DBGFS_DUMP("\n", HDMI_AUDN);
693
659 seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):", 694 seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
660 HDMI_IFRAME_SLOT_AVI); 695 HDMI_IFRAME_SLOT_AVI);
661 DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI); 696 DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
690 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); 725 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
691 seq_puts(s, "\n"); 726 seq_puts(s, "\n");
692 727
693 mutex_unlock(&dev->struct_mutex);
694 return 0; 728 return 0;
695} 729}
696 730
@@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
861 895
862 count = drm_add_edid_modes(connector, edid); 896 count = drm_add_edid_modes(connector, edid);
863 drm_mode_connector_update_edid_property(connector, edid); 897 drm_mode_connector_update_edid_property(connector, edid);
898 drm_edid_to_eld(connector, edid);
864 899
865 kfree(edid); 900 kfree(edid);
866 return count; 901 return count;
@@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
897 return MODE_OK; 932 return MODE_OK;
898} 933}
899 934
900struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
901{
902 struct sti_hdmi_connector *hdmi_connector
903 = to_sti_hdmi_connector(connector);
904
905 /* Best encoder is the one associated during connector creation */
906 return hdmi_connector->encoder;
907}
908
909static const 935static const
910struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { 936struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
911 .get_modes = sti_hdmi_connector_get_modes, 937 .get_modes = sti_hdmi_connector_get_modes,
912 .mode_valid = sti_hdmi_connector_mode_valid, 938 .mode_valid = sti_hdmi_connector_mode_valid,
913 .best_encoder = sti_hdmi_best_encoder,
914}; 939};
915 940
916/* get detection status of display device */ 941/* get detection status of display device */
@@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
932 return connector_status_disconnected; 957 return connector_status_disconnected;
933} 958}
934 959
935static void sti_hdmi_connector_destroy(struct drm_connector *connector)
936{
937 struct sti_hdmi_connector *hdmi_connector
938 = to_sti_hdmi_connector(connector);
939
940 drm_connector_unregister(connector);
941 drm_connector_cleanup(connector);
942 kfree(hdmi_connector);
943}
944
945static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, 960static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
946 struct drm_connector *connector) 961 struct drm_connector *connector)
947{ 962{
@@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
1024 return -EINVAL; 1039 return -EINVAL;
1025} 1040}
1026 1041
1042static int sti_hdmi_late_register(struct drm_connector *connector)
1043{
1044 struct sti_hdmi_connector *hdmi_connector
1045 = to_sti_hdmi_connector(connector);
1046 struct sti_hdmi *hdmi = hdmi_connector->hdmi;
1047
1048 if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
1049 DRM_ERROR("HDMI debugfs setup failed\n");
1050 return -EINVAL;
1051 }
1052
1053 return 0;
1054}
1055
1027static const struct drm_connector_funcs sti_hdmi_connector_funcs = { 1056static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
1028 .dpms = drm_atomic_helper_connector_dpms,
1029 .fill_modes = drm_helper_probe_single_connector_modes, 1057 .fill_modes = drm_helper_probe_single_connector_modes,
1030 .detect = sti_hdmi_connector_detect, 1058 .detect = sti_hdmi_connector_detect,
1031 .destroy = sti_hdmi_connector_destroy, 1059 .destroy = drm_connector_cleanup,
1032 .reset = drm_atomic_helper_connector_reset, 1060 .reset = drm_atomic_helper_connector_reset,
1033 .set_property = drm_atomic_helper_connector_set_property, 1061 .set_property = drm_atomic_helper_connector_set_property,
1034 .atomic_set_property = sti_hdmi_connector_set_property, 1062 .atomic_set_property = sti_hdmi_connector_set_property,
1035 .atomic_get_property = sti_hdmi_connector_get_property, 1063 .atomic_get_property = sti_hdmi_connector_get_property,
1036 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1064 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1037 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1065 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1066 .late_register = sti_hdmi_late_register,
1038}; 1067};
1039 1068
1040static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) 1069static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
@@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
1049 return NULL; 1078 return NULL;
1050} 1079}
1051 1080
1081/**
1082 * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
1083 * clocks. None-coherent clocks means that audio and TMDS clocks have not the
1084 * same source (drifts between clocks). In this case assumption is that CTS is
1085 * automatically calculated by hardware.
1086 *
1087 * @audio_fs: audio frame clock frequency in Hz
1088 *
1089 * Values computed are based on table described in HDMI specification 1.4b
1090 *
1091 * Returns n value.
1092 */
1093static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
1094{
1095 unsigned int n;
1096
1097 switch (audio_fs) {
1098 case 32000:
1099 n = 4096;
1100 break;
1101 case 44100:
1102 n = 6272;
1103 break;
1104 case 48000:
1105 n = 6144;
1106 break;
1107 case 88200:
1108 n = 6272 * 2;
1109 break;
1110 case 96000:
1111 n = 6144 * 2;
1112 break;
1113 case 176400:
1114 n = 6272 * 4;
1115 break;
1116 case 192000:
1117 n = 6144 * 4;
1118 break;
1119 default:
1120 /* Not pre-defined, recommended value: 128 * fs / 1000 */
1121 n = (audio_fs * 128) / 1000;
1122 }
1123
1124 return n;
1125}
1126
1127static int hdmi_audio_configure(struct sti_hdmi *hdmi,
1128 struct hdmi_audio_params *params)
1129{
1130 int audio_cfg, n;
1131 struct hdmi_audio_infoframe *info = &params->cea;
1132
1133 DRM_DEBUG_DRIVER("\n");
1134
1135 if (!hdmi->enabled)
1136 return 0;
1137
1138 /* update N parameter */
1139 n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
1140
1141 DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
1142 params->sample_rate, hdmi->mode.clock * 1000, n);
1143 hdmi_write(hdmi, n, HDMI_AUDN);
1144
1145 /* update HDMI registers according to configuration */
1146 audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
1147 HDMI_AUD_CFG_ONE_BIT_INVALID;
1148
1149 switch (info->channels) {
1150 case 8:
1151 audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
1152 case 6:
1153 audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
1154 case 4:
1155 audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
1156 case 2:
1157 audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
1158 break;
1159 default:
1160 DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
1161 info->channels);
1162 return -EINVAL;
1163 }
1164
1165 hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
1166
1167 hdmi->audio = *params;
1168
1169 return hdmi_audio_infoframe_config(hdmi);
1170}
1171
1172static void hdmi_audio_shutdown(struct device *dev, void *data)
1173{
1174 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1175 int audio_cfg;
1176
1177 DRM_DEBUG_DRIVER("\n");
1178
1179 /* disable audio */
1180 audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
1181 HDMI_AUD_CFG_ONE_BIT_INVALID;
1182 hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
1183
1184 hdmi->audio.enabled = 0;
1185 hdmi_audio_infoframe_config(hdmi);
1186}
1187
1188static int hdmi_audio_hw_params(struct device *dev,
1189 void *data,
1190 struct hdmi_codec_daifmt *daifmt,
1191 struct hdmi_codec_params *params)
1192{
1193 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1194 int ret;
1195 struct hdmi_audio_params audio = {
1196 .sample_width = params->sample_width,
1197 .sample_rate = params->sample_rate,
1198 .cea = params->cea,
1199 };
1200
1201 DRM_DEBUG_DRIVER("\n");
1202
1203 if (!hdmi->enabled)
1204 return 0;
1205
1206 if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
1207 daifmt->frame_clk_inv || daifmt->bit_clk_master ||
1208 daifmt->frame_clk_master) {
1209 dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
1210 daifmt->bit_clk_inv, daifmt->frame_clk_inv,
1211 daifmt->bit_clk_master,
1212 daifmt->frame_clk_master);
1213 return -EINVAL;
1214 }
1215
1216 audio.enabled = 1;
1217
1218 ret = hdmi_audio_configure(hdmi, &audio);
1219 if (ret < 0)
1220 return ret;
1221
1222 return 0;
1223}
1224
1225static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
1226{
1227 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1228
1229 DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
1230
1231 if (enable)
1232 hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
1233 else
1234 hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
1235
1236 return 0;
1237}
1238
1239static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
1240{
1241 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1242 struct drm_connector *connector = hdmi->drm_connector;
1243
1244 DRM_DEBUG_DRIVER("\n");
1245 memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
1246
1247 return 0;
1248}
1249
1250static const struct hdmi_codec_ops audio_codec_ops = {
1251 .hw_params = hdmi_audio_hw_params,
1252 .audio_shutdown = hdmi_audio_shutdown,
1253 .digital_mute = hdmi_audio_digital_mute,
1254 .get_eld = hdmi_audio_get_eld,
1255};
1256
1257static int sti_hdmi_register_audio_driver(struct device *dev,
1258 struct sti_hdmi *hdmi)
1259{
1260 struct hdmi_codec_pdata codec_data = {
1261 .ops = &audio_codec_ops,
1262 .max_i2s_channels = 8,
1263 .i2s = 1,
1264 };
1265
1266 DRM_DEBUG_DRIVER("\n");
1267
1268 hdmi->audio.enabled = 0;
1269
1270 hdmi->audio_pdev = platform_device_register_data(
1271 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
1272 &codec_data, sizeof(codec_data));
1273
1274 if (IS_ERR(hdmi->audio_pdev))
1275 return PTR_ERR(hdmi->audio_pdev);
1276
1277 DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
1278
1279 return 0;
1280}
1281
1052static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) 1282static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1053{ 1283{
1054 struct sti_hdmi *hdmi = dev_get_drvdata(dev); 1284 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1095 /* initialise property */ 1325 /* initialise property */
1096 sti_hdmi_connector_init_property(drm_dev, drm_connector); 1326 sti_hdmi_connector_init_property(drm_dev, drm_connector);
1097 1327
1098 err = drm_connector_register(drm_connector); 1328 hdmi->drm_connector = drm_connector;
1099 if (err)
1100 goto err_connector;
1101 1329
1102 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 1330 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
1103 if (err) { 1331 if (err) {
@@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1105 goto err_sysfs; 1333 goto err_sysfs;
1106 } 1334 }
1107 1335
1336 err = sti_hdmi_register_audio_driver(dev, hdmi);
1337 if (err) {
1338 DRM_ERROR("Failed to attach an audio codec\n");
1339 goto err_sysfs;
1340 }
1341
1342 /* Initialize audio infoframe */
1343 err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
1344 if (err) {
1345 DRM_ERROR("Failed to init audio infoframe\n");
1346 goto err_sysfs;
1347 }
1348
1108 /* Enable default interrupts */ 1349 /* Enable default interrupts */
1109 hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); 1350 hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
1110 1351
1111 if (hdmi_debugfs_init(hdmi, drm_dev->primary))
1112 DRM_ERROR("HDMI debugfs setup failed\n");
1113
1114 return 0; 1352 return 0;
1115 1353
1116err_sysfs: 1354err_sysfs:
1117 drm_connector_unregister(drm_connector); 1355 drm_bridge_remove(bridge);
1118err_connector: 1356 hdmi->drm_connector = NULL;
1119 drm_connector_cleanup(drm_connector);
1120
1121 return -EINVAL; 1357 return -EINVAL;
1122} 1358}
1123 1359
@@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
1267 struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); 1503 struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
1268 1504
1269 i2c_put_adapter(hdmi->ddc_adapt); 1505 i2c_put_adapter(hdmi->ddc_adapt);
1506 if (hdmi->audio_pdev)
1507 platform_device_unregister(hdmi->audio_pdev);
1270 component_del(&pdev->dev, &sti_hdmi_ops); 1508 component_del(&pdev->dev, &sti_hdmi_ops);
1271 1509
1272 return 0; 1510 return 0;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index ef3a94583bbd..119bc3582ac7 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
23 void (*stop)(struct sti_hdmi *hdmi); 23 void (*stop)(struct sti_hdmi *hdmi);
24}; 24};
25 25
26struct hdmi_audio_params {
27 bool enabled;
28 unsigned int sample_width;
29 unsigned int sample_rate;
30 struct hdmi_audio_infoframe cea;
31};
32
26/* values for the framing mode property */ 33/* values for the framing mode property */
27enum sti_hdmi_modes { 34enum sti_hdmi_modes {
28 HDMI_MODE_HDMI, 35 HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
67 * @ddc_adapt: i2c ddc adapter 74 * @ddc_adapt: i2c ddc adapter
68 * @colorspace: current colorspace selected 75 * @colorspace: current colorspace selected
69 * @hdmi_mode: select framing for HDMI or DVI 76 * @hdmi_mode: select framing for HDMI or DVI
77 * @audio_pdev: ASoC hdmi-codec platform device
78 * @audio: hdmi audio parameters.
79 * @drm_connector: hdmi connector
70 */ 80 */
71struct sti_hdmi { 81struct sti_hdmi {
72 struct device dev; 82 struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
89 struct i2c_adapter *ddc_adapt; 99 struct i2c_adapter *ddc_adapt;
90 enum hdmi_colorspace colorspace; 100 enum hdmi_colorspace colorspace;
91 enum sti_hdmi_modes hdmi_mode; 101 enum sti_hdmi_modes hdmi_mode;
102 struct platform_device *audio_pdev;
103 struct hdmi_audio_params audio;
104 struct drm_connector *drm_connector;
92}; 105};
93 106
94u32 hdmi_read(struct sti_hdmi *hdmi, int offset); 107u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1edec29b9e45..33d2f42550cc 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
555{ 555{
556 struct drm_info_node *node = s->private; 556 struct drm_info_node *node = s->private;
557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; 557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
558 struct drm_device *dev = node->minor->dev;
559 int cmd, cmd_offset, infoxp70; 558 int cmd, cmd_offset, infoxp70;
560 void *virt; 559 void *virt;
561 int ret;
562
563 ret = mutex_lock_interruptible(&dev->struct_mutex);
564 if (ret)
565 return ret;
566 560
567 seq_printf(s, "%s: (vaddr = 0x%p)", 561 seq_printf(s, "%s: (vaddr = 0x%p)",
568 sti_plane_to_str(&hqvdp->plane), hqvdp->regs); 562 sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
630 624
631 seq_puts(s, "\n"); 625 seq_puts(s, "\n");
632 626
633 mutex_unlock(&dev->struct_mutex);
634 return 0; 627 return 0;
635} 628}
636 629
@@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
1241 .atomic_disable = sti_hqvdp_atomic_disable, 1234 .atomic_disable = sti_hqvdp_atomic_disable,
1242}; 1235};
1243 1236
1237static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
1238{
1239 DRM_DEBUG_DRIVER("\n");
1240
1241 drm_plane_helper_disable(drm_plane);
1242 drm_plane_cleanup(drm_plane);
1243}
1244
1245static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
1246{
1247 struct sti_plane *plane = to_sti_plane(drm_plane);
1248 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1249
1250 return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
1251}
1252
1253struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
1254 .update_plane = drm_atomic_helper_update_plane,
1255 .disable_plane = drm_atomic_helper_disable_plane,
1256 .destroy = sti_hqvdp_destroy,
1257 .set_property = sti_plane_set_property,
1258 .reset = drm_atomic_helper_plane_reset,
1259 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1260 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
1261 .late_register = sti_hqvdp_late_register,
1262};
1263
1244static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, 1264static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1245 struct device *dev, int desc) 1265 struct device *dev, int desc)
1246{ 1266{
@@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1253 sti_hqvdp_init(hqvdp); 1273 sti_hqvdp_init(hqvdp);
1254 1274
1255 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, 1275 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
1256 &sti_plane_helpers_funcs, 1276 &sti_hqvdp_plane_helpers_funcs,
1257 hqvdp_supported_formats, 1277 hqvdp_supported_formats,
1258 ARRAY_SIZE(hqvdp_supported_formats), 1278 ARRAY_SIZE(hqvdp_supported_formats),
1259 DRM_PLANE_TYPE_OVERLAY, NULL); 1279 DRM_PLANE_TYPE_OVERLAY, NULL);
@@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1266 1286
1267 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); 1287 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
1268 1288
1269 if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
1270 DRM_ERROR("HQVDP debugfs setup failed\n");
1271
1272 return &hqvdp->plane.drm_plane; 1289 return &hqvdp->plane.drm_plane;
1273} 1290}
1274 1291
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b51f7..1885c7ab5a8b 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
151{ 151{
152 struct drm_info_node *node = s->private; 152 struct drm_info_node *node = s->private;
153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; 153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
154 struct drm_device *dev = node->minor->dev;
155 int ret;
156
157 ret = mutex_lock_interruptible(&dev->struct_mutex);
158 if (ret)
159 return ret;
160 154
161 seq_printf(s, "%s: (vaddr = 0x%p)", 155 seq_printf(s, "%s: (vaddr = 0x%p)",
162 sti_mixer_to_str(mixer), mixer->regs); 156 sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
176 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); 170 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
177 seq_puts(s, "\n"); 171 seq_puts(s, "\n");
178 172
179 mutex_unlock(&dev->struct_mutex);
180 return 0; 173 return 0;
181} 174}
182 175
@@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
188 { "mixer_aux", mixer_dbg_show, 0, NULL }, 181 { "mixer_aux", mixer_dbg_show, 0, NULL },
189}; 182};
190 183
191static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) 184int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
192{ 185{
193 unsigned int i; 186 unsigned int i;
194 struct drm_info_list *mixer_debugfs_files; 187 struct drm_info_list *mixer_debugfs_files;
@@ -400,8 +393,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
400 DRM_DEBUG_DRIVER("%s created. Regs=%p\n", 393 DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
401 sti_mixer_to_str(mixer), mixer->regs); 394 sti_mixer_to_str(mixer), mixer->regs);
402 395
403 if (mixer_debugfs_init(mixer, drm_dev->primary))
404 DRM_ERROR("MIXER debugfs setup failed\n");
405
406 return mixer; 396 return mixer;
407} 397}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 6f35fc086873..830a3c42d886 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
55 55
56void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 56void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
57 57
58int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
59
58/* depth in Cross-bar control = z order */ 60/* depth in Cross-bar control = z order */
59#define GAM_MIXER_NB_DEPTH_LEVEL 6 61#define GAM_MIXER_NB_DEPTH_LEVEL 6
60 62
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index f10c98d3f012..0cf3335ef37c 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
45 45
46#define STI_FPS_INTERVAL_MS 3000 46#define STI_FPS_INTERVAL_MS 3000
47 47
48static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
49{
50 struct timespec tmp_ts = timespec_sub(lhs, rhs);
51 u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
52
53 do_div(tmp_ns, NSEC_PER_MSEC);
54
55 return (u32)tmp_ns;
56}
57
58void sti_plane_update_fps(struct sti_plane *plane, 48void sti_plane_update_fps(struct sti_plane *plane,
59 bool new_frame, 49 bool new_frame,
60 bool new_field) 50 bool new_field)
61{ 51{
62 struct timespec now; 52 ktime_t now;
63 struct sti_fps_info *fps; 53 struct sti_fps_info *fps;
64 int fpks, fipks, ms_since_last, num_frames, num_fields; 54 int fpks, fipks, ms_since_last, num_frames, num_fields;
65 55
66 getrawmonotonic(&now); 56 now = ktime_get();
67 57
68 /* Compute number of frame updates */ 58 /* Compute number of frame updates */
69 fps = &plane->fps_info; 59 fps = &plane->fps_info;
@@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
76 return; 66 return;
77 67
78 fps->curr_frame_counter++; 68 fps->curr_frame_counter++;
79 ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp); 69 ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
80 num_frames = fps->curr_frame_counter - fps->last_frame_counter; 70 num_frames = fps->curr_frame_counter - fps->last_frame_counter;
81 71
82 if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS) 72 if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
@@ -106,17 +96,9 @@ void sti_plane_update_fps(struct sti_plane *plane,
106 plane->fps_info.fips_str); 96 plane->fps_info.fips_str);
107} 97}
108 98
109static void sti_plane_destroy(struct drm_plane *drm_plane) 99int sti_plane_set_property(struct drm_plane *drm_plane,
110{ 100 struct drm_property *property,
111 DRM_DEBUG_DRIVER("\n"); 101 uint64_t val)
112
113 drm_plane_helper_disable(drm_plane);
114 drm_plane_cleanup(drm_plane);
115}
116
117static int sti_plane_set_property(struct drm_plane *drm_plane,
118 struct drm_property *property,
119 uint64_t val)
120{ 102{
121 struct drm_device *dev = drm_plane->dev; 103 struct drm_device *dev = drm_plane->dev;
122 struct sti_private *private = dev->dev_private; 104 struct sti_private *private = dev->dev_private;
@@ -170,13 +152,3 @@ void sti_plane_init_property(struct sti_plane *plane,
170 plane->drm_plane.base.id, 152 plane->drm_plane.base.id,
171 sti_plane_to_str(plane), plane->zorder); 153 sti_plane_to_str(plane), plane->zorder);
172} 154}
173
174struct drm_plane_funcs sti_plane_helpers_funcs = {
175 .update_plane = drm_atomic_helper_update_plane,
176 .disable_plane = drm_atomic_helper_disable_plane,
177 .destroy = sti_plane_destroy,
178 .set_property = sti_plane_set_property,
179 .reset = drm_atomic_helper_plane_reset,
180 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
181 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
182};
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index c50a3b9f5d37..e0ea1dd3bb88 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -11,8 +11,6 @@
11#include <drm/drm_atomic_helper.h> 11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_plane_helper.h> 12#include <drm/drm_plane_helper.h>
13 13
14extern struct drm_plane_funcs sti_plane_helpers_funcs;
15
16#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) 14#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
17 15
18#define STI_PLANE_TYPE_SHIFT 8 16#define STI_PLANE_TYPE_SHIFT 8
@@ -57,7 +55,7 @@ struct sti_fps_info {
57 unsigned int last_frame_counter; 55 unsigned int last_frame_counter;
58 unsigned int curr_field_counter; 56 unsigned int curr_field_counter;
59 unsigned int last_field_counter; 57 unsigned int last_field_counter;
60 struct timespec last_timestamp; 58 ktime_t last_timestamp;
61 char fps_str[FPS_LENGTH]; 59 char fps_str[FPS_LENGTH];
62 char fips_str[FPS_LENGTH]; 60 char fips_str[FPS_LENGTH];
63}; 61};
@@ -83,6 +81,11 @@ const char *sti_plane_to_str(struct sti_plane *plane);
83void sti_plane_update_fps(struct sti_plane *plane, 81void sti_plane_update_fps(struct sti_plane *plane,
84 bool new_frame, 82 bool new_frame,
85 bool new_field); 83 bool new_field);
84
85int sti_plane_set_property(struct drm_plane *drm_plane,
86 struct drm_property *property,
87 uint64_t val);
88
86void sti_plane_init_property(struct sti_plane *plane, 89void sti_plane_init_property(struct sti_plane *plane,
87 enum drm_plane_type type); 90 enum drm_plane_type type);
88#endif 91#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a59da..e25995b35715 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -112,6 +112,7 @@ struct sti_tvout {
112 struct drm_encoder *hdmi; 112 struct drm_encoder *hdmi;
113 struct drm_encoder *hda; 113 struct drm_encoder *hda;
114 struct drm_encoder *dvo; 114 struct drm_encoder *dvo;
115 bool debugfs_registered;
115}; 116};
116 117
117struct sti_tvout_encoder { 118struct sti_tvout_encoder {
@@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
515{ 516{
516 struct drm_info_node *node = s->private; 517 struct drm_info_node *node = s->private;
517 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; 518 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
518 struct drm_device *dev = node->minor->dev;
519 struct drm_crtc *crtc; 519 struct drm_crtc *crtc;
520 int ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
525 520
526 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); 521 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
527 522
@@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
587 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); 582 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
588 seq_puts(s, "\n"); 583 seq_puts(s, "\n");
589 584
590 mutex_unlock(&dev->struct_mutex);
591 return 0; 585 return 0;
592} 586}
593 587
@@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
632 kfree(sti_encoder); 626 kfree(sti_encoder);
633} 627}
634 628
629static int sti_tvout_late_register(struct drm_encoder *encoder)
630{
631 struct sti_tvout *tvout = to_sti_tvout(encoder);
632 int ret;
633
634 if (tvout->debugfs_registered)
635 return 0;
636
637 ret = tvout_debugfs_init(tvout, encoder->dev->primary);
638 if (ret)
639 return ret;
640
641 tvout->debugfs_registered = true;
642 return 0;
643}
644
645static void sti_tvout_early_unregister(struct drm_encoder *encoder)
646{
647 struct sti_tvout *tvout = to_sti_tvout(encoder);
648
649 if (!tvout->debugfs_registered)
650 return;
651
652 tvout_debugfs_exit(tvout, encoder->dev->primary);
653 tvout->debugfs_registered = false;
654}
655
635static const struct drm_encoder_funcs sti_tvout_encoder_funcs = { 656static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
636 .destroy = sti_tvout_encoder_destroy, 657 .destroy = sti_tvout_encoder_destroy,
658 .late_register = sti_tvout_late_register,
659 .early_unregister = sti_tvout_early_unregister,
637}; 660};
638 661
639static void sti_dvo_encoder_enable(struct drm_encoder *encoder) 662static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
@@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
820 843
821 sti_tvout_create_encoders(drm_dev, tvout); 844 sti_tvout_create_encoders(drm_dev, tvout);
822 845
823 if (tvout_debugfs_init(tvout, drm_dev->primary))
824 DRM_ERROR("TVOUT debugfs setup failed\n");
825
826 return 0; 846 return 0;
827} 847}
828 848
@@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
830 void *data) 850 void *data)
831{ 851{
832 struct sti_tvout *tvout = dev_get_drvdata(dev); 852 struct sti_tvout *tvout = dev_get_drvdata(dev);
833 struct drm_device *drm_dev = data;
834 853
835 sti_tvout_destroy_encoders(tvout); 854 sti_tvout_destroy_encoders(tvout);
836
837 tvout_debugfs_exit(tvout, drm_dev->primary);
838} 855}
839 856
840static const struct component_ops sti_tvout_ops = { 857static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5ac6..47634a0251fc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
92{ 92{
93 struct drm_info_node *node = s->private; 93 struct drm_info_node *node = s->private;
94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; 94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
95 struct drm_device *dev = node->minor->dev;
96 int ret;
97
98 ret = mutex_lock_interruptible(&dev->struct_mutex);
99 if (ret)
100 return ret;
101 95
102 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); 96 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
103 97
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
122 DBGFS_DUMP(VID_CSAT); 116 DBGFS_DUMP(VID_CSAT);
123 seq_puts(s, "\n"); 117 seq_puts(s, "\n");
124 118
125 mutex_unlock(&dev->struct_mutex);
126 return 0; 119 return 0;
127} 120}
128 121
@@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = {
130 { "vid", vid_dbg_show, 0, NULL }, 123 { "vid", vid_dbg_show, 0, NULL },
131}; 124};
132 125
133static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) 126int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
134{ 127{
135 unsigned int i; 128 unsigned int i;
136 129
@@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
227 220
228 sti_vid_init(vid); 221 sti_vid_init(vid);
229 222
230 if (vid_debugfs_init(vid, drm_dev->primary))
231 DRM_ERROR("VID debugfs setup failed\n");
232
233 return vid; 223 return vid;
234} 224}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 6c842344f3d8..fdc90f922a05 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
26struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, 26struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
27 int id, void __iomem *baseaddr); 27 int id, void __iomem *baseaddr);
28 28
29int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
30
29#endif 31#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 6bf4ce466d20..957ce712ea44 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -65,7 +65,7 @@
65#define HDMI_DELAY (5) 65#define HDMI_DELAY (5)
66 66
67/* Delay introduced by the DVO in nb of pixel */ 67/* Delay introduced by the DVO in nb of pixel */
68#define DVO_DELAY (2) 68#define DVO_DELAY (7)
69 69
70/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ 70/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
71#define AWG_DELAY_HD (-9) 71#define AWG_DELAY_HD (-9)
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 99510e64e91a..a4b357db8856 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -1,6 +1,6 @@
1config DRM_SUN4I 1config DRM_SUN4I
2 tristate "DRM Support for Allwinner A10 Display Engine" 2 tristate "DRM Support for Allwinner A10 Display Engine"
3 depends on DRM && ARM 3 depends on DRM && ARM && COMMON_CLK
4 depends on ARCH_SUNXI || COMPILE_TEST 4 depends on ARCH_SUNXI || COMPILE_TEST
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index f7a15c1a93bf..3ab560450a82 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
190 /* Get the physical address of the buffer in memory */ 190 /* Get the physical address of the buffer in memory */
191 gem = drm_fb_cma_get_gem_obj(fb, 0); 191 gem = drm_fb_cma_get_gem_obj(fb, 0);
192 192
193 DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); 193 DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
194 194
195 /* Compute the start of the displayed memory */ 195 /* Compute the start of the displayed memory */
196 bpp = drm_format_plane_cpp(fb->pixel_format, 0); 196 bpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
198 paddr += (state->src_x >> 16) * bpp; 198 paddr += (state->src_x >> 16) * bpp;
199 paddr += (state->src_y >> 16) * fb->pitches[0]; 199 paddr += (state->src_y >> 16) * fb->pitches[0];
200 200
201 DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); 201 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
202 202
203 /* Write the 32 lower bits of the address (in bits) */ 203 /* Write the 32 lower bits of the address (in bits) */
204 lo_paddr = paddr << 3; 204 lo_paddr = paddr << 3;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 4182a21f5923..f628b6d8f23f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
51{ 51{
52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
53 struct sun4i_drv *drv = scrtc->drv; 53 struct sun4i_drv *drv = scrtc->drv;
54 struct drm_pending_vblank_event *event = crtc->state->event;
54 55
55 DRM_DEBUG_DRIVER("Committing plane changes\n"); 56 DRM_DEBUG_DRIVER("Committing plane changes\n");
56 57
57 sun4i_backend_commit(drv->backend); 58 sun4i_backend_commit(drv->backend);
59
60 if (event) {
61 crtc->state->event = NULL;
62
63 spin_lock_irq(&crtc->dev->event_lock);
64 if (drm_crtc_vblank_get(crtc) == 0)
65 drm_crtc_arm_vblank_event(crtc, event);
66 else
67 drm_crtc_send_vblank_event(crtc, event);
68 spin_unlock_irq(&crtc->dev->event_lock);
69 }
58} 70}
59 71
60static void sun4i_crtc_disable(struct drm_crtc *crtc) 72static void sun4i_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 3ff668cb463c..5b3463197c48 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
72static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, 72static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
73 unsigned long *parent_rate) 73 unsigned long *parent_rate)
74{ 74{
75 return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); 75 unsigned long best_parent = 0;
76 u8 best_div = 1;
77 int i;
78
79 for (i = 6; i < 127; i++) {
80 unsigned long ideal = rate * i;
81 unsigned long rounded;
82
83 rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
84 ideal);
85
86 if (rounded == ideal) {
87 best_parent = rounded;
88 best_div = i;
89 goto out;
90 }
91
92 if ((rounded < ideal) && (rounded > best_parent)) {
93 best_parent = rounded;
94 best_div = i;
95 }
96 }
97
98out:
99 *parent_rate = best_parent;
100
101 return best_parent / best_div;
76} 102}
77 103
78static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, 104static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
79 unsigned long parent_rate) 105 unsigned long parent_rate)
80{ 106{
81 struct sun4i_dclk *dclk = hw_to_dclk(hw); 107 struct sun4i_dclk *dclk = hw_to_dclk(hw);
82 int div = DIV_ROUND_CLOSEST(parent_rate, rate); 108 u8 div = parent_rate / rate;
83 109
84 return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, 110 return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
85 GENMASK(6, 0), div); 111 GENMASK(6, 0), div);
@@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
127 const char *clk_name, *parent_name; 153 const char *clk_name, *parent_name;
128 struct clk_init_data init; 154 struct clk_init_data init;
129 struct sun4i_dclk *dclk; 155 struct sun4i_dclk *dclk;
156 int ret;
130 157
131 parent_name = __clk_get_name(tcon->sclk0); 158 parent_name = __clk_get_name(tcon->sclk0);
132 of_property_read_string_index(dev->of_node, "clock-output-names", 0, 159 ret = of_property_read_string_index(dev->of_node,
133 &clk_name); 160 "clock-output-names", 0,
161 &clk_name);
162 if (ret)
163 return ret;
134 164
135 dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); 165 dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
136 if (!dclk) 166 if (!dclk)
@@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
140 init.ops = &sun4i_dclk_ops; 170 init.ops = &sun4i_dclk_ops;
141 init.parent_names = &parent_name; 171 init.parent_names = &parent_name;
142 init.num_parents = 1; 172 init.num_parents = 1;
173 init.flags = CLK_SET_RATE_PARENT;
143 174
144 dclk->regmap = tcon->regs; 175 dclk->regmap = tcon->regs;
145 dclk->hw.init = &init; 176 dclk->hw.init = &init;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 76e922bb60e5..5b89940edcb1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -24,34 +24,6 @@
24#include "sun4i_layer.h" 24#include "sun4i_layer.h"
25#include "sun4i_tcon.h" 25#include "sun4i_tcon.h"
26 26
27static int sun4i_drv_connector_plug_all(struct drm_device *drm)
28{
29 struct drm_connector *connector, *failed;
30 int ret;
31
32 mutex_lock(&drm->mode_config.mutex);
33 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
34 ret = drm_connector_register(connector);
35 if (ret) {
36 failed = connector;
37 goto err;
38 }
39 }
40 mutex_unlock(&drm->mode_config.mutex);
41 return 0;
42
43err:
44 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
45 if (failed == connector)
46 break;
47
48 drm_connector_unregister(connector);
49 }
50 mutex_unlock(&drm->mode_config.mutex);
51
52 return ret;
53}
54
55static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) 27static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
56{ 28{
57 struct sun4i_drv *drv = drm->dev_private; 29 struct sun4i_drv *drv = drm->dev_private;
@@ -103,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = {
103 .dumb_create = drm_gem_cma_dumb_create, 75 .dumb_create = drm_gem_cma_dumb_create,
104 .dumb_destroy = drm_gem_dumb_destroy, 76 .dumb_destroy = drm_gem_dumb_destroy,
105 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 77 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
106 .gem_free_object = drm_gem_cma_free_object, 78 .gem_free_object_unlocked = drm_gem_cma_free_object,
107 .gem_vm_ops = &drm_gem_cma_vm_ops, 79 .gem_vm_ops = &drm_gem_cma_vm_ops,
108 80
109 /* PRIME Operations */ 81 /* PRIME Operations */
@@ -125,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = {
125 .disable_vblank = sun4i_drv_disable_vblank, 97 .disable_vblank = sun4i_drv_disable_vblank,
126}; 98};
127 99
100static void sun4i_remove_framebuffers(void)
101{
102 struct apertures_struct *ap;
103
104 ap = alloc_apertures(1);
105 if (!ap)
106 return;
107
108 /* The framebuffer can be located anywhere in RAM */
109 ap->ranges[0].base = 0;
110 ap->ranges[0].size = ~0;
111
112 remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
113 kfree(ap);
114}
115
128static int sun4i_drv_bind(struct device *dev) 116static int sun4i_drv_bind(struct device *dev)
129{ 117{
130 struct drm_device *drm; 118 struct drm_device *drm;
@@ -135,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev)
135 if (!drm) 123 if (!drm)
136 return -ENOMEM; 124 return -ENOMEM;
137 125
138 ret = drm_dev_set_unique(drm, dev_name(drm->dev));
139 if (ret)
140 goto free_drm;
141
142 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); 126 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
143 if (!drv) { 127 if (!drv) {
144 ret = -ENOMEM; 128 ret = -ENOMEM;
@@ -172,6 +156,9 @@ static int sun4i_drv_bind(struct device *dev)
172 } 156 }
173 drm->irq_enabled = true; 157 drm->irq_enabled = true;
174 158
159 /* Remove early framebuffers (ie. simplefb) */
160 sun4i_remove_framebuffers();
161
175 /* Create our framebuffer */ 162 /* Create our framebuffer */
176 drv->fbdev = sun4i_framebuffer_init(drm); 163 drv->fbdev = sun4i_framebuffer_init(drm);
177 if (IS_ERR(drv->fbdev)) { 164 if (IS_ERR(drv->fbdev)) {
@@ -187,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev)
187 if (ret) 174 if (ret)
188 goto free_drm; 175 goto free_drm;
189 176
190 ret = sun4i_drv_connector_plug_all(drm);
191 if (ret)
192 goto unregister_drm;
193
194 return 0; 177 return 0;
195 178
196unregister_drm:
197 drm_dev_unregister(drm);
198free_drm: 179free_drm:
199 drm_dev_unref(drm); 180 drm_dev_unref(drm);
200 return ret; 181 return ret;
@@ -204,6 +185,7 @@ static void sun4i_drv_unbind(struct device *dev)
204{ 185{
205 struct drm_device *drm = dev_get_drvdata(dev); 186 struct drm_device *drm = dev_get_drvdata(dev);
206 187
188 drm_connector_unregister_all(drm);
207 drm_dev_unregister(drm); 189 drm_dev_unregister(drm);
208 drm_kms_helper_poll_fini(drm); 190 drm_kms_helper_poll_fini(drm);
209 sun4i_framebuffer_free(drm); 191 sun4i_framebuffer_free(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index ab6494818050..f5bbac6efb4c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
54static int sun4i_rgb_mode_valid(struct drm_connector *connector, 54static int sun4i_rgb_mode_valid(struct drm_connector *connector,
55 struct drm_display_mode *mode) 55 struct drm_display_mode *mode)
56{ 56{
57 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
58 struct sun4i_drv *drv = rgb->drv;
59 struct sun4i_tcon *tcon = drv->tcon;
57 u32 hsync = mode->hsync_end - mode->hsync_start; 60 u32 hsync = mode->hsync_end - mode->hsync_start;
58 u32 vsync = mode->vsync_end - mode->vsync_start; 61 u32 vsync = mode->vsync_end - mode->vsync_start;
62 unsigned long rate = mode->clock * 1000;
63 long rounded_rate;
59 64
60 DRM_DEBUG_DRIVER("Validating modes...\n"); 65 DRM_DEBUG_DRIVER("Validating modes...\n");
61 66
@@ -87,22 +92,21 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
87 92
88 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 93 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
89 94
90 return MODE_OK; 95 rounded_rate = clk_round_rate(tcon->dclk, rate);
91} 96 if (rounded_rate < rate)
97 return MODE_CLOCK_LOW;
92 98
93static struct drm_encoder * 99 if (rounded_rate > rate)
94sun4i_rgb_best_encoder(struct drm_connector *connector) 100 return MODE_CLOCK_HIGH;
95{
96 struct sun4i_rgb *rgb =
97 drm_connector_to_sun4i_rgb(connector);
98 101
99 return &rgb->encoder; 102 DRM_DEBUG_DRIVER("Clock rate OK\n");
103
104 return MODE_OK;
100} 105}
101 106
102static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { 107static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
103 .get_modes = sun4i_rgb_get_modes, 108 .get_modes = sun4i_rgb_get_modes,
104 .mode_valid = sun4i_rgb_mode_valid, 109 .mode_valid = sun4i_rgb_mode_valid,
105 .best_encoder = sun4i_rgb_best_encoder,
106}; 110};
107 111
108static enum drm_connector_status 112static enum drm_connector_status
@@ -203,7 +207,7 @@ int sun4i_rgb_init(struct drm_device *drm)
203 int ret; 207 int ret;
204 208
205 /* If we don't have a panel, there's no point in going on */ 209 /* If we don't have a panel, there's no point in going on */
206 if (!tcon->panel) 210 if (IS_ERR(tcon->panel))
207 return -ENODEV; 211 return -ENODEV;
208 212
209 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); 213 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 9f19b0e08560..652385f09735 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
425 425
426 remote = of_graph_get_remote_port_parent(end_node); 426 remote = of_graph_get_remote_port_parent(end_node);
427 if (!remote) { 427 if (!remote) {
428 DRM_DEBUG_DRIVER("Enable to parse remote node\n"); 428 DRM_DEBUG_DRIVER("Unable to parse remote node\n");
429 return ERR_PTR(-EINVAL); 429 return ERR_PTR(-EINVAL);
430 } 430 }
431 431
432 return of_drm_find_panel(remote); 432 return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
433} 433}
434 434
435static int sun4i_tcon_bind(struct device *dev, struct device *master, 435static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
490 return 0; 490 return 0;
491 } 491 }
492 492
493 return sun4i_rgb_init(drm); 493 ret = sun4i_rgb_init(drm);
494 if (ret < 0)
495 goto err_free_clocks;
496
497 return 0;
494 498
495err_free_clocks: 499err_free_clocks:
496 sun4i_tcon_free_clocks(tcon); 500 sun4i_tcon_free_clocks(tcon);
@@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
522 * Defer the probe. 526 * Defer the probe.
523 */ 527 */
524 panel = sun4i_tcon_find_panel(node); 528 panel = sun4i_tcon_find_panel(node);
525 if (IS_ERR(panel)) { 529
526 /* 530 /*
527 * If we don't have a panel endpoint, just go on 531 * If we don't have a panel endpoint, just go on
528 */ 532 */
529 if (PTR_ERR(panel) != -ENODEV) 533 if (PTR_ERR(panel) == -EPROBE_DEFER) {
530 return -EPROBE_DEFER; 534 DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
535 return -EPROBE_DEFER;
531 } 536 }
532 537
533 return component_add(&pdev->dev, &sun4i_tcon_ops); 538 return component_add(&pdev->dev, &sun4i_tcon_ops);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index bc047f923508..b84147896294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
526 return MODE_OK; 526 return MODE_OK;
527} 527}
528 528
529static struct drm_encoder *
530sun4i_tv_comp_best_encoder(struct drm_connector *connector)
531{
532 struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
533
534 return &tv->encoder;
535}
536
537static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { 529static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
538 .get_modes = sun4i_tv_comp_get_modes, 530 .get_modes = sun4i_tv_comp_get_modes,
539 .mode_valid = sun4i_tv_comp_mode_valid, 531 .mode_valid = sun4i_tv_comp_mode_valid,
540 .best_encoder = sun4i_tv_comp_best_encoder,
541}; 532};
542 533
543static enum drm_connector_status 534static enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b59c3bf0df44..a177a42a9849 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
93 * the software side now. 93 * the software side now.
94 */ 94 */
95 95
96 drm_atomic_helper_swap_state(drm, state); 96 drm_atomic_helper_swap_state(state, true);
97 97
98 if (nonblock) 98 if (nonblock)
99 tegra_atomic_schedule(tegra, state); 99 tegra_atomic_schedule(tegra, state);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f52d6cb24ff5..0ddcce1b420d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
239void tegra_output_exit(struct tegra_output *output); 239void tegra_output_exit(struct tegra_output *output);
240 240
241int tegra_output_connector_get_modes(struct drm_connector *connector); 241int tegra_output_connector_get_modes(struct drm_connector *connector);
242struct drm_encoder *
243tegra_output_connector_best_encoder(struct drm_connector *connector);
244enum drm_connector_status 242enum drm_connector_status
245tegra_output_connector_detect(struct drm_connector *connector, bool force); 243tegra_output_connector_detect(struct drm_connector *connector, bool force);
246void tegra_output_connector_destroy(struct drm_connector *connector); 244void tegra_output_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d1239ebc190f..099cccb2fbcb 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -794,7 +794,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
794static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { 794static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
795 .get_modes = tegra_output_connector_get_modes, 795 .get_modes = tegra_output_connector_get_modes,
796 .mode_valid = tegra_dsi_connector_mode_valid, 796 .mode_valid = tegra_dsi_connector_mode_valid,
797 .best_encoder = tegra_output_connector_best_encoder,
798}; 797};
799 798
800static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { 799static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index b7ef4929e347..2fdb8796443e 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -806,7 +806,6 @@ static const struct drm_connector_helper_funcs
806tegra_hdmi_connector_helper_funcs = { 806tegra_hdmi_connector_helper_funcs = {
807 .get_modes = tegra_output_connector_get_modes, 807 .get_modes = tegra_output_connector_get_modes,
808 .mode_valid = tegra_hdmi_connector_mode_valid, 808 .mode_valid = tegra_hdmi_connector_mode_valid,
809 .best_encoder = tegra_output_connector_best_encoder,
810}; 809};
811 810
812static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { 811static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 46664b622270..1480f6aaffe4 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -42,14 +42,6 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
42 return err; 42 return err;
43} 43}
44 44
45struct drm_encoder *
46tegra_output_connector_best_encoder(struct drm_connector *connector)
47{
48 struct tegra_output *output = connector_to_output(connector);
49
50 return &output->encoder;
51}
52
53enum drm_connector_status 45enum drm_connector_status
54tegra_output_connector_detect(struct drm_connector *connector, bool force) 46tegra_output_connector_detect(struct drm_connector *connector, bool force)
55{ 47{
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index e246334e0252..a131b44e2d6f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { 112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
113 .get_modes = tegra_output_connector_get_modes, 113 .get_modes = tegra_output_connector_get_modes,
114 .mode_valid = tegra_rgb_connector_mode_valid, 114 .mode_valid = tegra_rgb_connector_mode_valid,
115 .best_encoder = tegra_output_connector_best_encoder,
116}; 115};
117 116
118static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { 117static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 757c6e8603af..34958d71284b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1087,7 +1087,6 @@ tegra_sor_connector_mode_valid(struct drm_connector *connector,
1087static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { 1087static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
1088 .get_modes = tegra_sor_connector_get_modes, 1088 .get_modes = tegra_sor_connector_get_modes,
1089 .mode_valid = tegra_sor_connector_mode_valid, 1089 .mode_valid = tegra_sor_connector_mode_valid,
1090 .best_encoder = tegra_output_connector_best_encoder,
1091}; 1090};
1092 1091
1093static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { 1092static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc903524d..d27809372d54 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = {
541 .load = tilcdc_load, 541 .load = tilcdc_load,
542 .unload = tilcdc_unload, 542 .unload = tilcdc_unload,
543 .lastclose = tilcdc_lastclose, 543 .lastclose = tilcdc_lastclose,
544 .set_busid = drm_platform_set_busid,
545 .irq_handler = tilcdc_irq, 544 .irq_handler = tilcdc_irq,
546 .irq_preinstall = tilcdc_irq_preinstall, 545 .irq_preinstall = tilcdc_irq_preinstall,
547 .irq_postinstall = tilcdc_irq_postinstall, 546 .irq_postinstall = tilcdc_irq_postinstall,
@@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = {
549 .get_vblank_counter = drm_vblank_no_hw_counter, 548 .get_vblank_counter = drm_vblank_no_hw_counter,
550 .enable_vblank = tilcdc_enable_vblank, 549 .enable_vblank = tilcdc_enable_vblank,
551 .disable_vblank = tilcdc_disable_vblank, 550 .disable_vblank = tilcdc_disable_vblank,
552 .gem_free_object = drm_gem_cma_free_object, 551 .gem_free_object_unlocked = drm_gem_cma_free_object,
553 .gem_vm_ops = &drm_gem_cma_vm_ops, 552 .gem_vm_ops = &drm_gem_cma_vm_ops,
554 .dumb_create = drm_gem_cma_dumb_create, 553 .dumb_create = drm_gem_cma_dumb_create,
555 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 554 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44995..f92ea9579674 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
376 376
377 spin_lock_irqsave(&dev->event_lock, flags); 377 spin_lock_irqsave(&dev->event_lock, flags);
378 if (event) 378 if (event)
379 drm_send_vblank_event(dev, 0, event); 379 drm_crtc_send_vblank_event(crtc, event);
380 spin_unlock_irqrestore(&dev->event_lock, flags); 380 spin_unlock_irqrestore(&dev->event_lock, flags);
381 crtc->primary->fb = fb; 381 crtc->primary->fb = fb;
382 382
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf45f..59adcf8532dd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
291 291
292/* Called on the last userspace/kernel unreference of the BO. Returns 292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it. 293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */ 294 */
297void vc4_free_object(struct drm_gem_object *gem_bo) 295void vc4_free_object(struct drm_gem_object *gem_bo)
298{ 296{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 904d0754ad78..c82d468d178b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -175,20 +175,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
176} 176}
177 177
178static void 178static int
179vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 179vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
180 uint32_t start, uint32_t size) 180 uint32_t size)
181{ 181{
182 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 182 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
183 u32 i; 183 u32 i;
184 184
185 for (i = start; i < start + size; i++) { 185 for (i = 0; i < size; i++) {
186 vc4_crtc->lut_r[i] = r[i] >> 8; 186 vc4_crtc->lut_r[i] = r[i] >> 8;
187 vc4_crtc->lut_g[i] = g[i] >> 8; 187 vc4_crtc->lut_g[i] = g[i] >> 8;
188 vc4_crtc->lut_b[i] = b[i] >> 8; 188 vc4_crtc->lut_b[i] = b[i] >> 8;
189 } 189 }
190 190
191 vc4_crtc_lut_load(crtc); 191 vc4_crtc_lut_load(crtc);
192
193 return 0;
192} 194}
193 195
194static u32 vc4_get_fifo_full_level(u32 format) 196static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +397,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
395 struct vc4_dev *vc4 = to_vc4_dev(dev); 397 struct vc4_dev *vc4 = to_vc4_dev(dev);
396 struct drm_plane *plane; 398 struct drm_plane *plane;
397 unsigned long flags; 399 unsigned long flags;
400 const struct drm_plane_state *plane_state;
398 u32 dlist_count = 0; 401 u32 dlist_count = 0;
399 int ret; 402 int ret;
400 403
@@ -404,18 +407,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
404 if (hweight32(state->connector_mask) > 1) 407 if (hweight32(state->connector_mask) > 1)
405 return -EINVAL; 408 return -EINVAL;
406 409
407 drm_atomic_crtc_state_for_each_plane(plane, state) { 410 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
408 struct drm_plane_state *plane_state =
409 state->state->plane_states[drm_plane_index(plane)];
410
411 /* plane might not have changed, in which case take
412 * current state:
413 */
414 if (!plane_state)
415 plane_state = plane->state;
416
417 dlist_count += vc4_plane_dlist_size(plane_state); 411 dlist_count += vc4_plane_dlist_size(plane_state);
418 }
419 412
420 dlist_count++; /* Account for SCALER_CTL0_END. */ 413 dlist_count++; /* Account for SCALER_CTL0_END. */
421 414
@@ -456,14 +449,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
456 449
457 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 450 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
458 451
459 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
460 vc4_state->mm.start);
461
462 if (debug_dump_regs) {
463 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
464 vc4_hvs_dump_state(dev);
465 }
466
467 if (crtc->state->event) { 452 if (crtc->state->event) {
468 unsigned long flags; 453 unsigned long flags;
469 454
@@ -473,8 +458,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
473 458
474 spin_lock_irqsave(&dev->event_lock, flags); 459 spin_lock_irqsave(&dev->event_lock, flags);
475 vc4_crtc->event = crtc->state->event; 460 vc4_crtc->event = crtc->state->event;
476 spin_unlock_irqrestore(&dev->event_lock, flags);
477 crtc->state->event = NULL; 461 crtc->state->event = NULL;
462
463 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
464 vc4_state->mm.start);
465
466 spin_unlock_irqrestore(&dev->event_lock, flags);
467 } else {
468 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
469 vc4_state->mm.start);
470 }
471
472 if (debug_dump_regs) {
473 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
474 vc4_hvs_dump_state(dev);
478 } 475 }
479} 476}
480 477
@@ -500,12 +497,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
500{ 497{
501 struct drm_crtc *crtc = &vc4_crtc->base; 498 struct drm_crtc *crtc = &vc4_crtc->base;
502 struct drm_device *dev = crtc->dev; 499 struct drm_device *dev = crtc->dev;
500 struct vc4_dev *vc4 = to_vc4_dev(dev);
501 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
502 u32 chan = vc4_crtc->channel;
503 unsigned long flags; 503 unsigned long flags;
504 504
505 spin_lock_irqsave(&dev->event_lock, flags); 505 spin_lock_irqsave(&dev->event_lock, flags);
506 if (vc4_crtc->event) { 506 if (vc4_crtc->event &&
507 (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
507 drm_crtc_send_vblank_event(crtc, vc4_crtc->event); 508 drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
508 vc4_crtc->event = NULL; 509 vc4_crtc->event = NULL;
510 drm_crtc_vblank_put(crtc);
509 } 511 }
510 spin_unlock_irqrestore(&dev->event_lock, flags); 512 spin_unlock_irqrestore(&dev->event_lock, flags);
511} 513}
@@ -556,6 +558,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
556 spin_unlock_irqrestore(&dev->event_lock, flags); 558 spin_unlock_irqrestore(&dev->event_lock, flags);
557 } 559 }
558 560
561 drm_crtc_vblank_put(crtc);
559 drm_framebuffer_unreference(flip_state->fb); 562 drm_framebuffer_unreference(flip_state->fb);
560 kfree(flip_state); 563 kfree(flip_state);
561 564
@@ -598,6 +601,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
598 return ret; 601 return ret;
599 } 602 }
600 603
604 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
605
601 /* Immediately update the plane's legacy fb pointer, so that later 606 /* Immediately update the plane's legacy fb pointer, so that later
602 * modeset prep sees the state that will be present when the semaphore 607 * modeset prep sees the state that will be present when the semaphore
603 * is released. 608 * is released.
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 9817dbfa4ac3..dba1114297e4 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
208 return 0; 208 return 0;
209} 209}
210 210
211static struct drm_encoder *
212vc4_dpi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_dpi_connector *dpi_connector =
215 to_vc4_dpi_connector(connector);
216 return dpi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_dpi_connector_funcs = { 211static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_dpi_connector_detect, 213 .detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
230 .get_modes = vc4_dpi_connector_get_modes, 222 .get_modes = vc4_dpi_connector_get_modes,
231 .best_encoder = vc4_dpi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 3446ece21b4a..54d0471243dd 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
66}; 66};
67 67
68static const struct drm_ioctl_desc vc4_drm_ioctls[] = { 68static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
69 DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), 69 DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
70 DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), 70 DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
71 DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), 71 DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), 72 DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
73 DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), 73 DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
74 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), 74 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
75 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, 75 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
76 DRM_ROOT_ONLY), 76 DRM_ROOT_ONLY),
77}; 77};
@@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
91 91
92 .enable_vblank = vc4_enable_vblank, 92 .enable_vblank = vc4_enable_vblank,
93 .disable_vblank = vc4_disable_vblank, 93 .disable_vblank = vc4_disable_vblank,
94 .get_vblank_counter = drm_vblank_count, 94 .get_vblank_counter = drm_vblank_no_hw_counter,
95 95
96#if defined(CONFIG_DEBUG_FS) 96#if defined(CONFIG_DEBUG_FS)
97 .debugfs_init = vc4_debugfs_init, 97 .debugfs_init = vc4_debugfs_init,
@@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = {
99#endif 99#endif
100 100
101 .gem_create_object = vc4_create_object, 101 .gem_create_object = vc4_create_object,
102 .gem_free_object = vc4_free_object, 102 .gem_free_object_unlocked = vc4_free_object,
103 .gem_vm_ops = &drm_gem_cma_vm_ops, 103 .gem_vm_ops = &drm_gem_cma_vm_ops,
104 104
105 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 105 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -176,7 +176,6 @@ static int vc4_drm_bind(struct device *dev)
176{ 176{
177 struct platform_device *pdev = to_platform_device(dev); 177 struct platform_device *pdev = to_platform_device(dev);
178 struct drm_device *drm; 178 struct drm_device *drm;
179 struct drm_connector *connector;
180 struct vc4_dev *vc4; 179 struct vc4_dev *vc4;
181 int ret = 0; 180 int ret = 0;
182 181
@@ -211,22 +210,10 @@ static int vc4_drm_bind(struct device *dev)
211 if (ret < 0) 210 if (ret < 0)
212 goto unbind_all; 211 goto unbind_all;
213 212
214 /* Connector registration has to occur after DRM device
215 * registration, because it creates sysfs entries based on the
216 * DRM device.
217 */
218 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
219 ret = drm_connector_register(connector);
220 if (ret)
221 goto unregister;
222 }
223
224 vc4_kms_load(drm); 213 vc4_kms_load(drm);
225 214
226 return 0; 215 return 0;
227 216
228unregister:
229 drm_dev_unregister(drm);
230unbind_all: 217unbind_all:
231 component_unbind_all(dev, drm); 218 component_unbind_all(dev, drm);
232gem_destroy: 219gem_destroy:
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 37cac59401d7..c799baabc008 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -469,7 +469,7 @@ int vc4_kms_load(struct drm_device *dev);
469struct drm_plane *vc4_plane_init(struct drm_device *dev, 469struct drm_plane *vc4_plane_init(struct drm_device *dev,
470 enum drm_plane_type type); 470 enum drm_plane_type type);
471u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 471u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
472u32 vc4_plane_dlist_size(struct drm_plane_state *state); 472u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
473void vc4_plane_async_set_fb(struct drm_plane *plane, 473void vc4_plane_async_set_fb(struct drm_plane *plane,
474 struct drm_framebuffer *fb); 474 struct drm_framebuffer *fb);
475 475
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 46899d6de675..6155e8aca1c6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
53{ 53{
54 unsigned int i; 54 unsigned int i;
55 55
56 mutex_lock(&dev->struct_mutex);
57 for (i = 0; i < state->user_state.bo_count; i++) 56 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_unreference(state->bo[i]); 57 drm_gem_object_unreference_unlocked(state->bo[i]);
59 mutex_unlock(&dev->struct_mutex);
60 58
61 kfree(state); 59 kfree(state);
62} 60}
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
687 struct vc4_dev *vc4 = to_vc4_dev(dev); 685 struct vc4_dev *vc4 = to_vc4_dev(dev);
688 unsigned i; 686 unsigned i;
689 687
690 /* Need the struct lock for drm_gem_object_unreference(). */
691 mutex_lock(&dev->struct_mutex);
692 if (exec->bo) { 688 if (exec->bo) {
693 for (i = 0; i < exec->bo_count; i++) 689 for (i = 0; i < exec->bo_count; i++)
694 drm_gem_object_unreference(&exec->bo[i]->base); 690 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
695 kfree(exec->bo); 691 kfree(exec->bo);
696 } 692 }
697 693
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
699 struct vc4_bo *bo = list_first_entry(&exec->unref_list, 695 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
700 struct vc4_bo, unref_head); 696 struct vc4_bo, unref_head);
701 list_del(&bo->unref_head); 697 list_del(&bo->unref_head);
702 drm_gem_object_unreference(&bo->base.base); 698 drm_gem_object_unreference_unlocked(&bo->base.base);
703 } 699 }
704 mutex_unlock(&dev->struct_mutex);
705 700
706 mutex_lock(&vc4->power_lock); 701 mutex_lock(&vc4->power_lock);
707 if (--vc4->power_refcount == 0) 702 if (--vc4->power_refcount == 0)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd2644d231ff..68df91c3f860 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
208 return ret; 208 return ret;
209} 209}
210 210
211static struct drm_encoder *
212vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_hdmi_connector *hdmi_connector =
215 to_vc4_hdmi_connector(connector);
216 return hdmi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { 211static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_hdmi_connector_detect, 213 .detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
230 .get_modes = vc4_hdmi_connector_get_modes, 222 .get_modes = vc4_hdmi_connector_get_modes,
231 .best_encoder = vc4_hdmi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index cb37751bc99f..9a217fd025f3 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -111,16 +111,26 @@ static int vc4_atomic_commit(struct drm_device *dev,
111 int i; 111 int i;
112 uint64_t wait_seqno = 0; 112 uint64_t wait_seqno = 0;
113 struct vc4_commit *c; 113 struct vc4_commit *c;
114 struct drm_plane *plane;
115 struct drm_plane_state *new_state;
114 116
115 c = commit_init(state); 117 c = commit_init(state);
116 if (!c) 118 if (!c)
117 return -ENOMEM; 119 return -ENOMEM;
118 120
119 /* Make sure that any outstanding modesets have finished. */ 121 /* Make sure that any outstanding modesets have finished. */
120 ret = down_interruptible(&vc4->async_modeset); 122 if (nonblock) {
121 if (ret) { 123 ret = down_trylock(&vc4->async_modeset);
122 kfree(c); 124 if (ret) {
123 return ret; 125 kfree(c);
126 return -EBUSY;
127 }
128 } else {
129 ret = down_interruptible(&vc4->async_modeset);
130 if (ret) {
131 kfree(c);
132 return ret;
133 }
124 } 134 }
125 135
126 ret = drm_atomic_helper_prepare_planes(dev, state); 136 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -130,13 +140,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
130 return ret; 140 return ret;
131 } 141 }
132 142
133 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 143 for_each_plane_in_state(state, plane, new_state, i) {
134 struct drm_plane *plane = state->planes[i];
135 struct drm_plane_state *new_state = state->plane_states[i];
136
137 if (!plane)
138 continue;
139
140 if ((plane->state->fb != new_state->fb) && new_state->fb) { 144 if ((plane->state->fb != new_state->fb) && new_state->fb) {
141 struct drm_gem_cma_object *cma_bo = 145 struct drm_gem_cma_object *cma_bo =
142 drm_fb_cma_get_gem_obj(new_state->fb, 0); 146 drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -152,7 +156,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
152 * the software side now. 156 * the software side now.
153 */ 157 */
154 158
155 drm_atomic_helper_swap_state(dev, state); 159 drm_atomic_helper_swap_state(state, true);
156 160
157 /* 161 /*
158 * Everything below can be run asynchronously without the need to grab 162 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fde31..5d2c3d9fd17a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -690,9 +690,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
690 return vc4_state->dlist_count; 690 return vc4_state->dlist_count;
691} 691}
692 692
693u32 vc4_plane_dlist_size(struct drm_plane_state *state) 693u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
694{ 694{
695 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 695 const struct vc4_plane_state *vc4_state =
696 container_of(state, typeof(*vc4_state), base);
696 697
697 return vc4_state->dlist_count; 698 return vc4_state->dlist_count;
698} 699}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 6163b95c5411..f99eece4cc97 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -341,6 +341,10 @@
341#define SCALER_DISPLACT0 0x00000030 341#define SCALER_DISPLACT0 0x00000030
342#define SCALER_DISPLACT1 0x00000034 342#define SCALER_DISPLACT1 0x00000034
343#define SCALER_DISPLACT2 0x00000038 343#define SCALER_DISPLACT2 0x00000038
344#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
345 (x) * (SCALER_DISPLACT1 - \
346 SCALER_DISPLACT0))
347
344#define SCALER_DISPCTRL0 0x00000040 348#define SCALER_DISPCTRL0 0x00000040
345# define SCALER_DISPCTRLX_ENABLE BIT(31) 349# define SCALER_DISPCTRLX_ENABLE BIT(31)
346# define SCALER_DISPCTRLX_RESET BIT(30) 350# define SCALER_DISPCTRLX_RESET BIT(30)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3dde6..35ea5d02a827 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = {
235 235
236static struct drm_driver vgem_driver = { 236static struct drm_driver vgem_driver = {
237 .driver_features = DRIVER_GEM, 237 .driver_features = DRIVER_GEM,
238 .gem_free_object = vgem_gem_free_object, 238 .gem_free_object_unlocked = vgem_gem_free_object,
239 .gem_vm_ops = &vgem_gem_vm_ops, 239 .gem_vm_ops = &vgem_gem_vm_ops,
240 .ioctls = vgem_ioctls, 240 .ioctls = vgem_ioctls,
241 .fops = &vgem_driver_fops, 241 .fops = &vgem_driver_fops,
@@ -260,8 +260,6 @@ static int __init vgem_init(void)
260 goto out; 260 goto out;
261 } 261 }
262 262
263 drm_dev_set_unique(vgem_device, "vgem");
264
265 ret = drm_dev_register(vgem_device, 0); 263 ret = drm_dev_register(vgem_device, 0);
266 264
267 if (ret) 265 if (ret)
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4f20742e7788..a04ef1c992d9 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
208 struct via_file_private *file_priv = file->driver_priv; 208 struct via_file_private *file_priv = file->driver_priv;
209 struct via_memblock *entry, *next; 209 struct via_memblock *entry, *next;
210 210
211 if (!(file->minor->master && file->master->lock.hw_lock)) 211 if (!(dev->master && file->master->lock.hw_lock))
212 return; 212 return;
213 213
214 drm_legacy_idlelock_take(&file->master->lock); 214 drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88f44..ac758cdbc1bc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
29#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_atomic_helper.h> 30#include <drm/drm_atomic_helper.h>
31 31
32#define XRES_MIN 320 32#define XRES_MIN 32
33#define YRES_MIN 200 33#define YRES_MIN 32
34 34
35#define XRES_DEF 1024 35#define XRES_DEF 1024
36#define YRES_DEF 768 36#define YRES_DEF 768
@@ -38,138 +38,11 @@
38#define XRES_MAX 8192 38#define XRES_MAX 8192
39#define YRES_MAX 8192 39#define YRES_MAX 8192
40 40
41static void
42virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
43 struct virtio_gpu_output *output)
44{
45 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
46 output->cursor.resource_id = 0;
47 virtio_gpu_cursor_ping(vgdev, output);
48}
49
50static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
51 struct drm_file *file_priv,
52 uint32_t handle,
53 uint32_t width,
54 uint32_t height,
55 int32_t hot_x, int32_t hot_y)
56{
57 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
58 struct virtio_gpu_output *output =
59 container_of(crtc, struct virtio_gpu_output, crtc);
60 struct drm_gem_object *gobj = NULL;
61 struct virtio_gpu_object *qobj = NULL;
62 struct virtio_gpu_fence *fence = NULL;
63 int ret = 0;
64
65 if (handle == 0) {
66 virtio_gpu_hide_cursor(vgdev, output);
67 return 0;
68 }
69
70 /* lookup the cursor */
71 gobj = drm_gem_object_lookup(file_priv, handle);
72 if (gobj == NULL)
73 return -ENOENT;
74
75 qobj = gem_to_virtio_gpu_obj(gobj);
76
77 if (!qobj->hw_res_handle) {
78 ret = -EINVAL;
79 goto out;
80 }
81
82 virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
83 cpu_to_le32(64),
84 cpu_to_le32(64),
85 0, 0, &fence);
86 ret = virtio_gpu_object_reserve(qobj, false);
87 if (!ret) {
88 reservation_object_add_excl_fence(qobj->tbo.resv,
89 &fence->f);
90 fence_put(&fence->f);
91 virtio_gpu_object_unreserve(qobj);
92 virtio_gpu_object_wait(qobj, false);
93 }
94
95 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
96 output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
97 output->cursor.hot_x = cpu_to_le32(hot_x);
98 output->cursor.hot_y = cpu_to_le32(hot_y);
99 virtio_gpu_cursor_ping(vgdev, output);
100 ret = 0;
101
102out:
103 drm_gem_object_unreference_unlocked(gobj);
104 return ret;
105}
106
107static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
108 int x, int y)
109{
110 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
111 struct virtio_gpu_output *output =
112 container_of(crtc, struct virtio_gpu_output, crtc);
113
114 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
115 output->cursor.pos.x = cpu_to_le32(x);
116 output->cursor.pos.y = cpu_to_le32(y);
117 virtio_gpu_cursor_ping(vgdev, output);
118 return 0;
119}
120
121static int virtio_gpu_page_flip(struct drm_crtc *crtc,
122 struct drm_framebuffer *fb,
123 struct drm_pending_vblank_event *event,
124 uint32_t flags)
125{
126 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
127 struct virtio_gpu_output *output =
128 container_of(crtc, struct virtio_gpu_output, crtc);
129 struct drm_plane *plane = crtc->primary;
130 struct virtio_gpu_framebuffer *vgfb;
131 struct virtio_gpu_object *bo;
132 unsigned long irqflags;
133 uint32_t handle;
134
135 plane->fb = fb;
136 vgfb = to_virtio_gpu_framebuffer(plane->fb);
137 bo = gem_to_virtio_gpu_obj(vgfb->obj);
138 handle = bo->hw_res_handle;
139
140 DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
141 bo->dumb ? ", dumb" : "",
142 crtc->mode.hdisplay, crtc->mode.vdisplay);
143 if (bo->dumb) {
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(crtc->mode.hdisplay),
147 cpu_to_le32(crtc->mode.vdisplay),
148 0, 0, NULL);
149 }
150 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
151 crtc->mode.hdisplay,
152 crtc->mode.vdisplay, 0, 0);
153 virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
154 crtc->mode.hdisplay,
155 crtc->mode.vdisplay);
156
157 if (event) {
158 spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
159 drm_send_vblank_event(crtc->dev, -1, event);
160 spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
161 }
162
163 return 0;
164}
165
166static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { 41static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
167 .cursor_set2 = virtio_gpu_crtc_cursor_set,
168 .cursor_move = virtio_gpu_crtc_cursor_move,
169 .set_config = drm_atomic_helper_set_config, 42 .set_config = drm_atomic_helper_set_config,
170 .destroy = drm_crtc_cleanup, 43 .destroy = drm_crtc_cleanup,
171 44
172 .page_flip = virtio_gpu_page_flip, 45 .page_flip = drm_atomic_helper_page_flip,
173 .reset = drm_atomic_helper_crtc_reset, 46 .reset = drm_atomic_helper_crtc_reset,
174 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 47 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
175 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 48 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -267,6 +140,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
267 spin_lock_irqsave(&crtc->dev->event_lock, flags); 140 spin_lock_irqsave(&crtc->dev->event_lock, flags);
268 if (crtc->state->event) 141 if (crtc->state->event)
269 drm_crtc_send_vblank_event(crtc, crtc->state->event); 142 drm_crtc_send_vblank_event(crtc, crtc->state->event);
143 crtc->state->event = NULL;
270 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 144 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
271} 145}
272 146
@@ -341,15 +215,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
341 return MODE_BAD; 215 return MODE_BAD;
342} 216}
343 217
344static struct drm_encoder*
345virtio_gpu_best_encoder(struct drm_connector *connector)
346{
347 struct virtio_gpu_output *virtio_gpu_output =
348 drm_connector_to_virtio_gpu_output(connector);
349
350 return &virtio_gpu_output->enc;
351}
352
353static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { 218static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
354 .mode_set = virtio_gpu_enc_mode_set, 219 .mode_set = virtio_gpu_enc_mode_set,
355 .enable = virtio_gpu_enc_enable, 220 .enable = virtio_gpu_enc_enable,
@@ -359,7 +224,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
359static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { 224static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
360 .get_modes = virtio_gpu_conn_get_modes, 225 .get_modes = virtio_gpu_conn_get_modes,
361 .mode_valid = virtio_gpu_conn_mode_valid, 226 .mode_valid = virtio_gpu_conn_mode_valid,
362 .best_encoder = virtio_gpu_best_encoder,
363}; 227};
364 228
365static enum drm_connector_status virtio_gpu_conn_detect( 229static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +270,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
406 struct drm_connector *connector = &output->conn; 270 struct drm_connector *connector = &output->conn;
407 struct drm_encoder *encoder = &output->enc; 271 struct drm_encoder *encoder = &output->enc;
408 struct drm_crtc *crtc = &output->crtc; 272 struct drm_crtc *crtc = &output->crtc;
409 struct drm_plane *plane; 273 struct drm_plane *primary, *cursor;
410 274
411 output->index = index; 275 output->index = index;
412 if (index == 0) { 276 if (index == 0) {
@@ -415,13 +279,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
415 output->info.r.height = cpu_to_le32(YRES_DEF); 279 output->info.r.height = cpu_to_le32(YRES_DEF);
416 } 280 }
417 281
418 plane = virtio_gpu_plane_init(vgdev, index); 282 primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
419 if (IS_ERR(plane)) 283 if (IS_ERR(primary))
420 return PTR_ERR(plane); 284 return PTR_ERR(primary);
421 drm_crtc_init_with_planes(dev, crtc, plane, NULL, 285 cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
286 if (IS_ERR(cursor))
287 return PTR_ERR(cursor);
288 drm_crtc_init_with_planes(dev, crtc, primary, cursor,
422 &virtio_gpu_crtc_funcs, NULL); 289 &virtio_gpu_crtc_funcs, NULL);
423 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); 290 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
424 plane->crtc = crtc; 291 primary->crtc = crtc;
292 cursor->crtc = crtc;
425 293
426 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, 294 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
427 DRM_MODE_CONNECTOR_VIRTUAL); 295 DRM_MODE_CONNECTOR_VIRTUAL);
@@ -466,6 +334,24 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
466 return &virtio_gpu_fb->base; 334 return &virtio_gpu_fb->base;
467} 335}
468 336
337static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
338{
339 struct drm_device *dev = state->dev;
340
341 drm_atomic_helper_commit_modeset_disables(dev, state);
342 drm_atomic_helper_commit_modeset_enables(dev, state);
343 drm_atomic_helper_commit_planes(dev, state, true);
344
345 drm_atomic_helper_commit_hw_done(state);
346
347 drm_atomic_helper_wait_for_vblanks(dev, state);
348 drm_atomic_helper_cleanup_planes(dev, state);
349}
350
351struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
352 .atomic_commit_tail = vgdev_atomic_commit_tail,
353};
354
469static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { 355static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
470 .fb_create = virtio_gpu_user_framebuffer_create, 356 .fb_create = virtio_gpu_user_framebuffer_create,
471 .atomic_check = drm_atomic_helper_check, 357 .atomic_check = drm_atomic_helper_check,
@@ -477,7 +363,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
477 int i; 363 int i;
478 364
479 drm_mode_config_init(vgdev->ddev); 365 drm_mode_config_init(vgdev->ddev);
480 vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; 366 vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
367 vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
481 368
482 /* modes will be validated against the framebuffer size */ 369 /* modes will be validated against the framebuffer size */
483 vgdev->ddev->mode_config.min_width = XRES_MIN; 370 vgdev->ddev->mode_config.min_width = XRES_MIN;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 88a39165edd5..7f0e93f87a55 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -27,16 +27,6 @@
27 27
28#include "virtgpu_drv.h" 28#include "virtgpu_drv.h"
29 29
30int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
31{
32 struct pci_dev *pdev = dev->pdev;
33
34 if (pdev) {
35 return drm_pci_set_busid(dev, master);
36 }
37 return 0;
38}
39
40static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) 30static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
41{ 31{
42 struct apertures_struct *ap; 32 struct apertures_struct *ap;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77a35..c13f70cfc461 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
117 117
118static struct drm_driver driver = { 118static struct drm_driver driver = {
119 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, 119 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
120 .set_busid = drm_virtio_set_busid,
121 .load = virtio_gpu_driver_load, 120 .load = virtio_gpu_driver_load,
122 .unload = virtio_gpu_driver_unload, 121 .unload = virtio_gpu_driver_unload,
123 .open = virtio_gpu_driver_open, 122 .open = virtio_gpu_driver_open,
@@ -143,7 +142,7 @@ static struct drm_driver driver = {
143 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 142 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
144 .gem_prime_mmap = virtgpu_gem_prime_mmap, 143 .gem_prime_mmap = virtgpu_gem_prime_mmap,
145 144
146 .gem_free_object = virtio_gpu_gem_free_object, 145 .gem_free_object_unlocked = virtio_gpu_gem_free_object,
147 .gem_open_object = virtio_gpu_gem_object_open, 146 .gem_open_object = virtio_gpu_gem_object_open,
148 .gem_close_object = virtio_gpu_gem_object_close, 147 .gem_close_object = virtio_gpu_gem_object_close,
149 .fops = &virtio_gpu_driver_fops, 148 .fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f846a..b18ef3111f0c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
33 33
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/drm_gem.h> 35#include <drm/drm_gem.h>
36#include <drm/drm_atomic.h>
36#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
37#include <ttm/ttm_bo_api.h> 38#include <ttm/ttm_bo_api.h>
38#include <ttm/ttm_bo_driver.h> 39#include <ttm/ttm_bo_driver.h>
@@ -48,7 +49,6 @@
48#define DRIVER_PATCHLEVEL 1 49#define DRIVER_PATCHLEVEL 1
49 50
50/* virtgpu_drm_bus.c */ 51/* virtgpu_drm_bus.c */
51int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
52int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); 52int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
53 53
54struct virtio_gpu_object { 54struct virtio_gpu_object {
@@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
335 335
336/* virtio_gpu_plane.c */ 336/* virtio_gpu_plane.c */
337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
338 enum drm_plane_type type,
338 int index); 339 int index);
339 340
340/* virtio_gpu_ttm.c */ 341/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a2345ab..925ca25209df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
38 DRM_FORMAT_ABGR8888, 38 DRM_FORMAT_ABGR8888,
39}; 39};
40 40
41static const uint32_t virtio_gpu_cursor_formats[] = {
42 DRM_FORMAT_ARGB8888,
43};
44
41static void virtio_gpu_plane_destroy(struct drm_plane *plane) 45static void virtio_gpu_plane_destroy(struct drm_plane *plane)
42{ 46{
43 kfree(plane); 47 kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
58 return 0; 62 return 0;
59} 63}
60 64
61static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, 65static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
62 struct drm_plane_state *old_state) 66 struct drm_plane_state *old_state)
63{ 67{
64 struct drm_device *dev = plane->dev; 68 struct drm_device *dev = plane->dev;
65 struct virtio_gpu_device *vgdev = dev->dev_private; 69 struct virtio_gpu_device *vgdev = dev->dev_private;
66 struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); 70 struct virtio_gpu_output *output = NULL;
67 struct virtio_gpu_framebuffer *vgfb; 71 struct virtio_gpu_framebuffer *vgfb;
68 struct virtio_gpu_object *bo; 72 struct virtio_gpu_object *bo;
69 uint32_t handle; 73 uint32_t handle;
70 74
75 if (plane->state->crtc)
76 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
77 if (old_state->crtc)
78 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
79 WARN_ON(!output);
80
71 if (plane->state->fb) { 81 if (plane->state->fb) {
72 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 82 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
73 bo = gem_to_virtio_gpu_obj(vgfb->obj); 83 bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
75 if (bo->dumb) { 85 if (bo->dumb) {
76 virtio_gpu_cmd_transfer_to_host_2d 86 virtio_gpu_cmd_transfer_to_host_2d
77 (vgdev, handle, 0, 87 (vgdev, handle, 0,
78 cpu_to_le32(plane->state->crtc_w), 88 cpu_to_le32(plane->state->src_w >> 16),
79 cpu_to_le32(plane->state->crtc_h), 89 cpu_to_le32(plane->state->src_h >> 16),
80 plane->state->crtc_x, plane->state->crtc_y, NULL); 90 plane->state->src_x >> 16,
91 plane->state->src_y >> 16, NULL);
81 } 92 }
82 } else { 93 } else {
83 handle = 0; 94 handle = 0;
84 } 95 }
85 96
86 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, 97 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
87 plane->state->crtc_w, plane->state->crtc_h, 98 plane->state->crtc_w, plane->state->crtc_h,
88 plane->state->crtc_x, plane->state->crtc_y); 99 plane->state->crtc_x, plane->state->crtc_y,
100 plane->state->src_w >> 16,
101 plane->state->src_h >> 16,
102 plane->state->src_x >> 16,
103 plane->state->src_y >> 16);
89 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, 104 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
90 plane->state->crtc_w, 105 plane->state->src_w >> 16,
91 plane->state->crtc_h, 106 plane->state->src_h >> 16,
92 plane->state->crtc_x, 107 plane->state->src_x >> 16,
93 plane->state->crtc_y); 108 plane->state->src_y >> 16);
94 virtio_gpu_cmd_resource_flush(vgdev, handle, 109 virtio_gpu_cmd_resource_flush(vgdev, handle,
95 plane->state->crtc_x, 110 plane->state->src_x >> 16,
96 plane->state->crtc_y, 111 plane->state->src_y >> 16,
97 plane->state->crtc_w, 112 plane->state->src_w >> 16,
98 plane->state->crtc_h); 113 plane->state->src_h >> 16);
99} 114}
100 115
116static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
117 struct drm_plane_state *old_state)
118{
119 struct drm_device *dev = plane->dev;
120 struct virtio_gpu_device *vgdev = dev->dev_private;
121 struct virtio_gpu_output *output = NULL;
122 struct virtio_gpu_framebuffer *vgfb;
123 struct virtio_gpu_fence *fence = NULL;
124 struct virtio_gpu_object *bo = NULL;
125 uint32_t handle;
126 int ret = 0;
101 127
102static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { 128 if (plane->state->crtc)
129 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
130 if (old_state->crtc)
131 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
132 WARN_ON(!output);
133
134 if (plane->state->fb) {
135 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
136 bo = gem_to_virtio_gpu_obj(vgfb->obj);
137 handle = bo->hw_res_handle;
138 } else {
139 handle = 0;
140 }
141
142 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
143 /* new cursor -- update & wait */
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(plane->state->crtc_w),
147 cpu_to_le32(plane->state->crtc_h),
148 0, 0, &fence);
149 ret = virtio_gpu_object_reserve(bo, false);
150 if (!ret) {
151 reservation_object_add_excl_fence(bo->tbo.resv,
152 &fence->f);
153 fence_put(&fence->f);
154 fence = NULL;
155 virtio_gpu_object_unreserve(bo);
156 virtio_gpu_object_wait(bo, false);
157 }
158 }
159
160 if (plane->state->fb != old_state->fb) {
161 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
162 plane->state->crtc_x,
163 plane->state->crtc_y,
164 plane->state->fb ? plane->state->fb->hot_x : 0,
165 plane->state->fb ? plane->state->fb->hot_y : 0);
166 output->cursor.hdr.type =
167 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
168 output->cursor.resource_id = cpu_to_le32(handle);
169 if (plane->state->fb) {
170 output->cursor.hot_x =
171 cpu_to_le32(plane->state->fb->hot_x);
172 output->cursor.hot_y =
173 cpu_to_le32(plane->state->fb->hot_y);
174 } else {
175 output->cursor.hot_x = cpu_to_le32(0);
176 output->cursor.hot_y = cpu_to_le32(0);
177 }
178 } else {
179 DRM_DEBUG("move +%d+%d\n",
180 plane->state->crtc_x,
181 plane->state->crtc_y);
182 output->cursor.hdr.type =
183 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
184 }
185 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
186 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
187 virtio_gpu_cursor_ping(vgdev, output);
188}
189
190static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
191 .atomic_check = virtio_gpu_plane_atomic_check,
192 .atomic_update = virtio_gpu_primary_plane_update,
193};
194
195static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
103 .atomic_check = virtio_gpu_plane_atomic_check, 196 .atomic_check = virtio_gpu_plane_atomic_check,
104 .atomic_update = virtio_gpu_plane_atomic_update, 197 .atomic_update = virtio_gpu_cursor_plane_update,
105}; 198};
106 199
107struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 200struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
201 enum drm_plane_type type,
108 int index) 202 int index)
109{ 203{
110 struct drm_device *dev = vgdev->ddev; 204 struct drm_device *dev = vgdev->ddev;
205 const struct drm_plane_helper_funcs *funcs;
111 struct drm_plane *plane; 206 struct drm_plane *plane;
112 int ret; 207 const uint32_t *formats;
208 int ret, nformats;
113 209
114 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 210 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
115 if (!plane) 211 if (!plane)
116 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
117 213
214 if (type == DRM_PLANE_TYPE_CURSOR) {
215 formats = virtio_gpu_cursor_formats;
216 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
217 funcs = &virtio_gpu_cursor_helper_funcs;
218 } else {
219 formats = virtio_gpu_formats;
220 nformats = ARRAY_SIZE(virtio_gpu_formats);
221 funcs = &virtio_gpu_primary_helper_funcs;
222 }
118 ret = drm_universal_plane_init(dev, plane, 1 << index, 223 ret = drm_universal_plane_init(dev, plane, 1 << index,
119 &virtio_gpu_plane_funcs, 224 &virtio_gpu_plane_funcs,
120 virtio_gpu_formats, 225 formats, nformats,
121 ARRAY_SIZE(virtio_gpu_formats), 226 type, NULL);
122 DRM_PLANE_TYPE_PRIMARY, NULL);
123 if (ret) 227 if (ret)
124 goto err_plane_init; 228 goto err_plane_init;
125 229
126 drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); 230 drm_plane_helper_add(plane, funcs);
127 return plane; 231 return plane;
128 232
129err_plane_init: 233err_plane_init:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c940..aa04fb0159a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293 struct vmw_cmdbuf_man *man = header->man; 293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val; 294 u32 val;
295 295
296 if (sizeof(header->handle) > 4) 296 val = upper_32_bits(header->handle);
297 val = (header->handle >> 32);
298 else
299 val = 0;
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 297 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
301 298
302 val = (header->handle & 0xFFFFFFFFULL); 299 val = lower_32_bits(header->handle);
303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 300 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 301 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
305 302
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9fcd8200d485..60646644bef3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1049,7 +1049,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
1049 if (unlikely(ret != 0)) 1049 if (unlikely(ret != 0))
1050 return ERR_PTR(-ERESTARTSYS); 1050 return ERR_PTR(-ERESTARTSYS);
1051 1051
1052 if (file_priv->is_master) { 1052 if (drm_is_current_master(file_priv)) {
1053 mutex_unlock(&dev->master_mutex); 1053 mutex_unlock(&dev->master_mutex);
1054 return NULL; 1054 return NULL;
1055 } 1055 }
@@ -1228,8 +1228,7 @@ static int vmw_master_set(struct drm_device *dev,
1228} 1228}
1229 1229
1230static void vmw_master_drop(struct drm_device *dev, 1230static void vmw_master_drop(struct drm_device *dev,
1231 struct drm_file *file_priv, 1231 struct drm_file *file_priv)
1232 bool from_release)
1233{ 1232{
1234 struct vmw_private *dev_priv = vmw_priv(dev); 1233 struct vmw_private *dev_priv = vmw_priv(dev);
1235 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1234 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 1980e2a28265..9a90f824814e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h> 33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h> 34#include <drm/drm_hashtab.h>
35#include <drm/drm_auth.h>
35#include <linux/suspend.h> 36#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h> 37#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h> 38#include <drm/ttm/ttm_object.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true 47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */ 48 without the @goal_irq_mutex held. */
49 unsigned ctx; 49 u64 ctx;
50}; 50};
51 51
52struct vmw_user_fence { 52struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 55231cce73a0..8a69d4da40b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1404 return 0; 1404 return 0;
1405} 1405}
1406 1406
1407void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1407int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1408 u16 *r, u16 *g, u16 *b, 1408 u16 *r, u16 *g, u16 *b,
1409 uint32_t start, uint32_t size) 1409 uint32_t size)
1410{ 1410{
1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1412 int i; 1412 int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1420 } 1420 }
1421
1422 return 0;
1421} 1423}
1422 1424
1423int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 1425int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c501..ff4803c107bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
195void vmw_du_cleanup(struct vmw_display_unit *du); 195void vmw_du_cleanup(struct vmw_display_unit *du);
196void vmw_du_crtc_save(struct drm_crtc *crtc); 196void vmw_du_crtc_save(struct drm_crtc *crtc);
197void vmw_du_crtc_restore(struct drm_crtc *crtc); 197void vmw_du_crtc_restore(struct drm_crtc *crtc);
198void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 198int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
199 u16 *r, u16 *g, u16 *b, 199 u16 *r, u16 *g, u16 *b,
200 uint32_t start, uint32_t size); 200 uint32_t size);
201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, 201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
202 uint32_t handle, uint32_t width, uint32_t height, 202 uint32_t handle, uint32_t width, uint32_t height,
203 int32_t hot_x, int32_t hot_y); 203 int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6de283c8fa3e..f0374f9b56ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/frame.h>
31#include <asm/hypervisor.h> 32#include <asm/hypervisor.h>
32#include "drmP.h" 33#include "drmP.h"
33#include "vmwgfx_msg.h" 34#include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
194 195
195 return -EINVAL; 196 return -EINVAL;
196} 197}
197 198STACK_FRAME_NON_STANDARD(vmw_send_msg);
198 199
199 200
200/** 201/**
@@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
304 305
305 return 0; 306 return 0;
306} 307}
308STACK_FRAME_NON_STANDARD(vmw_recv_msg);
307 309
308 310
309/** 311/**
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d926..2df216b39cc5 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
30 30
31#define pr_fmt(fmt) "vga_switcheroo: " fmt 31#define pr_fmt(fmt) "vga_switcheroo: " fmt
32 32
33#include <linux/apple-gmux.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/debugfs.h> 35#include <linux/debugfs.h>
35#include <linux/fb.h> 36#include <linux/fb.h>
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
308 * 309 *
309 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a 310 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
310 * handler have already registered. The power state of the client is assumed 311 * handler have already registered. The power state of the client is assumed
311 * to be ON. 312 * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
313 * to ensure that all prerequisites are met.
312 * 314 *
313 * Return: 0 on success, -ENOMEM on memory allocation error. 315 * Return: 0 on success, -ENOMEM on memory allocation error.
314 */ 316 */
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
329 * @id: client identifier 331 * @id: client identifier
330 * 332 *
331 * Register audio client (audio device on a GPU). The power state of the 333 * Register audio client (audio device on a GPU). The power state of the
332 * client is assumed to be ON. 334 * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
335 * shall be called to ensure that all prerequisites are met.
333 * 336 *
334 * Return: 0 on success, -ENOMEM on memory allocation error. 337 * Return: 0 on success, -ENOMEM on memory allocation error.
335 */ 338 */
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
376} 379}
377 380
378/** 381/**
382 * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
383 * @pdev: client pci device
384 *
385 * Determine whether any prerequisites are not fulfilled to probe a given
386 * client. Drivers shall invoke this early on in their ->probe callback
387 * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
388 * register the client ere thou hast called this.
389 *
390 * Return: %true if probing should be deferred, otherwise %false.
391 */
392bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
393{
394 if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
395 /*
396 * apple-gmux is needed on pre-retina MacBook Pro
397 * to probe the panel if pdev is the inactive GPU.
398 */
399 if (apple_gmux_present() && pdev != vga_default_device() &&
400 !vgasr_priv.handler_flags)
401 return true;
402 }
403
404 return false;
405}
406EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
407
408/**
379 * vga_switcheroo_get_client_state() - obtain power state of a given client 409 * vga_switcheroo_get_client_state() - obtain power state of a given client
380 * @pdev: client pci device 410 * @pdev: client pci device
381 * 411 *
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index aad8c162a825..0cd4f7216239 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
261 struct elo_priv *priv = hid_get_drvdata(hdev); 261 struct elo_priv *priv = hid_get_drvdata(hdev);
262 262
263 hid_hw_stop(hdev); 263 hid_hw_stop(hdev);
264 flush_workqueue(wq); 264 cancel_delayed_work_sync(&priv->work);
265 kfree(priv); 265 kfree(priv);
266} 266}
267 267
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index c741f5e50a66..95b7d61d9910 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1401,6 +1401,11 @@ static const struct hid_device_id mt_devices[] = {
1401 MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, 1401 MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
1402 USB_DEVICE_ID_NOVATEK_PCT) }, 1402 USB_DEVICE_ID_NOVATEK_PCT) },
1403 1403
1404 /* Ntrig Panel */
1405 { .driver_data = MT_CLS_NSMU,
1406 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1407 USB_VENDOR_ID_NTRIG, 0x1b05) },
1408
1404 /* PixArt optical touch screen */ 1409 /* PixArt optical touch screen */
1405 { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, 1410 { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
1406 MT_USB_DEVICE(USB_VENDOR_ID_PIXART, 1411 MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 2f1ddca6f2e0..700145b15088 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
516 goto inval; 516 goto inval;
517 } else if (uref->usage_index >= field->report_count) 517 } else if (uref->usage_index >= field->report_count)
518 goto inval; 518 goto inval;
519
520 else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
521 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
522 uref->usage_index + uref_multi->num_values > field->report_count))
523 goto inval;
524 } 519 }
525 520
521 if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
522 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
523 uref->usage_index + uref_multi->num_values > field->report_count))
524 goto inval;
525
526 switch (cmd) { 526 switch (cmd) {
527 case HIDIOCGUSAGE: 527 case HIDIOCGUSAGE:
528 uref->value = field->value[uref->usage_index]; 528 uref->value = field->value[uref->usage_index];
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c43318d3416e..2ac87d553e22 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -35,6 +35,7 @@
35#include <linux/uaccess.h> 35#include <linux/uaccess.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/ctype.h>
38 39
39#include <linux/i8k.h> 40#include <linux/i8k.h>
40 41
@@ -66,11 +67,13 @@
66 67
67static DEFINE_MUTEX(i8k_mutex); 68static DEFINE_MUTEX(i8k_mutex);
68static char bios_version[4]; 69static char bios_version[4];
70static char bios_machineid[16];
69static struct device *i8k_hwmon_dev; 71static struct device *i8k_hwmon_dev;
70static u32 i8k_hwmon_flags; 72static u32 i8k_hwmon_flags;
71static uint i8k_fan_mult = I8K_FAN_MULT; 73static uint i8k_fan_mult = I8K_FAN_MULT;
72static uint i8k_pwm_mult; 74static uint i8k_pwm_mult;
73static uint i8k_fan_max = I8K_FAN_HIGH; 75static uint i8k_fan_max = I8K_FAN_HIGH;
76static bool disallow_fan_type_call;
74 77
75#define I8K_HWMON_HAVE_TEMP1 (1 << 0) 78#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
76#define I8K_HWMON_HAVE_TEMP2 (1 << 1) 79#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0);
94MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); 97MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
95 98
96#if IS_ENABLED(CONFIG_I8K) 99#if IS_ENABLED(CONFIG_I8K)
97static bool restricted; 100static bool restricted = true;
98module_param(restricted, bool, 0); 101module_param(restricted, bool, 0);
99MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); 102MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
100 103
101static bool power_status; 104static bool power_status;
102module_param(power_status, bool, 0600); 105module_param(power_status, bool, 0600);
103MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); 106MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
104#endif 107#endif
105 108
106static uint fan_mult; 109static uint fan_mult;
@@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan)
235/* 238/*
236 * Read the fan type. 239 * Read the fan type.
237 */ 240 */
238static int i8k_get_fan_type(int fan) 241static int _i8k_get_fan_type(int fan)
239{ 242{
240 struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; 243 struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
241 244
245 if (disallow_fan_type_call)
246 return -EINVAL;
247
242 regs.ebx = fan & 0xff; 248 regs.ebx = fan & 0xff;
243 return i8k_smm(&regs) ? : regs.eax & 0xff; 249 return i8k_smm(&regs) ? : regs.eax & 0xff;
244} 250}
245 251
252static int i8k_get_fan_type(int fan)
253{
254 /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
255 static int types[2] = { INT_MIN, INT_MIN };
256
257 if (types[fan] == INT_MIN)
258 types[fan] = _i8k_get_fan_type(fan);
259
260 return types[fan];
261}
262
246/* 263/*
247 * Read the fan nominal rpm for specific fan speed. 264 * Read the fan nominal rpm for specific fan speed.
248 */ 265 */
@@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
387 404
388 switch (cmd) { 405 switch (cmd) {
389 case I8K_BIOS_VERSION: 406 case I8K_BIOS_VERSION:
407 if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
408 !isdigit(bios_version[2]))
409 return -EINVAL;
410
390 val = (bios_version[0] << 16) | 411 val = (bios_version[0] << 16) |
391 (bios_version[1] << 8) | bios_version[2]; 412 (bios_version[1] << 8) | bios_version[2];
392 break; 413 break;
393 414
394 case I8K_MACHINE_ID: 415 case I8K_MACHINE_ID:
395 memset(buff, 0, 16); 416 if (restricted && !capable(CAP_SYS_ADMIN))
396 strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), 417 return -EPERM;
397 sizeof(buff)); 418
419 memset(buff, 0, sizeof(buff));
420 strlcpy(buff, bios_machineid, sizeof(buff));
398 break; 421 break;
399 422
400 case I8K_FN_STATUS: 423 case I8K_FN_STATUS:
@@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
511 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", 534 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
512 I8K_PROC_FMT, 535 I8K_PROC_FMT,
513 bios_version, 536 bios_version,
514 i8k_get_dmi_data(DMI_PRODUCT_SERIAL), 537 (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
515 cpu_temp, 538 cpu_temp,
516 left_fan, right_fan, left_speed, right_speed, 539 left_fan, right_fan, left_speed, right_speed,
517 ac_power, fn_key); 540 ac_power, fn_key);
@@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = {
718static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, 741static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
719 int index) 742 int index)
720{ 743{
744 if (disallow_fan_type_call &&
745 (index == 9 || index == 12))
746 return 0;
721 if (index >= 0 && index <= 1 && 747 if (index >= 0 && index <= 1 &&
722 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) 748 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
723 return 0; 749 return 0;
@@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void)
767 if (err >= 0) 793 if (err >= 0)
768 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; 794 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
769 795
770 /* First fan attributes, if fan type is OK */ 796 /* First fan attributes, if fan status or type is OK */
771 err = i8k_get_fan_type(0); 797 err = i8k_get_fan_status(0);
798 if (err < 0)
799 err = i8k_get_fan_type(0);
772 if (err >= 0) 800 if (err >= 0)
773 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; 801 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
774 802
775 /* Second fan attributes, if fan type is OK */ 803 /* Second fan attributes, if fan status or type is OK */
776 err = i8k_get_fan_type(1); 804 err = i8k_get_fan_status(1);
805 if (err < 0)
806 err = i8k_get_fan_type(1);
777 if (err >= 0) 807 if (err >= 0)
778 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; 808 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
779 809
@@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 959
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 960MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 961
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { 962/*
963 * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
964 * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
965 * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
966 * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
967 */
968static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
933 { 969 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8000
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8000", 970 .ident = "Dell Studio XPS 8000",
939 .matches = { 971 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 972 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
942 }, 974 },
943 }, 975 },
944 { 976 {
945 /*
946 * CPU fan speed going up and down on Dell Studio XPS 8100
947 * for unknown reasons.
948 */
949 .ident = "Dell Studio XPS 8100", 977 .ident = "Dell Studio XPS 8100",
950 .matches = { 978 .matches = {
951 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 979 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
952 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), 980 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
953 }, 981 },
954 }, 982 },
983 {
984 .ident = "Dell Inspiron 580",
985 .matches = {
986 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
987 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
988 },
989 },
955 { } 990 { }
956}; 991};
957 992
@@ -966,8 +1001,7 @@ static int __init i8k_probe(void)
966 /* 1001 /*
967 * Get DMI information 1002 * Get DMI information
968 */ 1003 */
969 if (!dmi_check_system(i8k_dmi_table) || 1004 if (!dmi_check_system(i8k_dmi_table)) {
970 dmi_check_system(i8k_blacklist_dmi_table)) {
971 if (!ignore_dmi && !force) 1005 if (!ignore_dmi && !force)
972 return -ENODEV; 1006 return -ENODEV;
973 1007
@@ -978,8 +1012,13 @@ static int __init i8k_probe(void)
978 i8k_get_dmi_data(DMI_BIOS_VERSION)); 1012 i8k_get_dmi_data(DMI_BIOS_VERSION));
979 } 1013 }
980 1014
1015 if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
1016 disallow_fan_type_call = true;
1017
981 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), 1018 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
982 sizeof(bios_version)); 1019 sizeof(bios_version));
1020 strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
1021 sizeof(bios_machineid));
983 1022
984 /* 1023 /*
985 * Get SMM Dell signature 1024 * Get SMM Dell signature
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index eb97a9241d17..15aa49d082c4 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
172 */ 172 */
173static int read_registers(struct fam15h_power_data *data) 173static int read_registers(struct fam15h_power_data *data)
174{ 174{
175 int this_cpu, ret, cpu;
176 int core, this_core; 175 int core, this_core;
177 cpumask_var_t mask; 176 cpumask_var_t mask;
177 int ret, cpu;
178 178
179 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 179 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
180 if (!ret) 180 if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
183 memset(data->cu_on, 0, sizeof(int) * MAX_CUS); 183 memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
184 184
185 get_online_cpus(); 185 get_online_cpus();
186 this_cpu = smp_processor_id();
187 186
188 /* 187 /*
189 * Choose the first online core of each compute unit, and then 188 * Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
205 cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); 204 cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
206 } 205 }
207 206
208 if (cpumask_test_cpu(this_cpu, mask)) 207 on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
209 do_read_registers_on_cu(data);
210 208
211 smp_call_function_many(mask, do_read_registers_on_cu, data, true);
212 put_online_cpus(); 209 put_online_cpus();
213
214 free_cpumask_var(mask); 210 free_cpumask_var(mask);
215 211
216 return 0; 212 return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c9ff08dbe10c..e30a5939dc0d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -375,7 +375,7 @@ struct lm90_data {
375 int kind; 375 int kind;
376 u32 flags; 376 u32 flags;
377 377
378 int update_interval; /* in milliseconds */ 378 unsigned int update_interval; /* in milliseconds */
379 379
380 u8 config_orig; /* Original configuration register value */ 380 u8 config_orig; /* Original configuration register value */
381 u8 convrate_orig; /* Original conversion rate register value */ 381 u8 convrate_orig; /* Original conversion rate register value */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 847d1b5f2c13..688be9e060fc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
300 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 300 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
301 /* 301 /*
302 * The trace run will continue with the same allocated trace 302 * The trace run will continue with the same allocated trace
303 * buffer. As such zero-out the buffer so that we don't end 303 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
304 * up with stale data. 304 * so we don't have to explicitly clear it. Also, since the
305 * 305 * tracer is still enabled drvdata::buf can't be NULL.
306 * Since the tracer is still enabled drvdata::buf
307 * can't be NULL.
308 */ 306 */
309 memset(drvdata->buf, 0, drvdata->size);
310 tmc_etr_enable_hw(drvdata); 307 tmc_etr_enable_hw(drvdata);
311 } else { 308 } else {
312 /* 309 /*
@@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
315 */ 312 */
316 vaddr = drvdata->vaddr; 313 vaddr = drvdata->vaddr;
317 paddr = drvdata->paddr; 314 paddr = drvdata->paddr;
318 drvdata->buf = NULL; 315 drvdata->buf = drvdata->vaddr = NULL;
319 } 316 }
320 317
321 drvdata->reading = false; 318 drvdata->reading = false;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 5443d03a1eec..d08d1ab9bba5 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
385 int i; 385 int i;
386 bool found = false; 386 bool found = false;
387 struct coresight_node *node; 387 struct coresight_node *node;
388 struct coresight_connection *conn;
389 388
390 /* An activated sink has been found. Enqueue the element */ 389 /* An activated sink has been found. Enqueue the element */
391 if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || 390 if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
394 393
395 /* Not a sink - recursively explore each port found on this element */ 394 /* Not a sink - recursively explore each port found on this element */
396 for (i = 0; i < csdev->nr_outport; i++) { 395 for (i = 0; i < csdev->nr_outport; i++) {
397 conn = &csdev->conns[i]; 396 struct coresight_device *child_dev = csdev->conns[i].child_dev;
398 if (_coresight_build_path(conn->child_dev, path) == 0) { 397
398 if (child_dev && _coresight_build_path(child_dev, path) == 0) {
399 found = true; 399 found = true;
400 break; 400 break;
401 } 401 }
@@ -425,6 +425,7 @@ out:
425struct list_head *coresight_build_path(struct coresight_device *csdev) 425struct list_head *coresight_build_path(struct coresight_device *csdev)
426{ 426{
427 struct list_head *path; 427 struct list_head *path;
428 int rc;
428 429
429 path = kzalloc(sizeof(struct list_head), GFP_KERNEL); 430 path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
430 if (!path) 431 if (!path)
@@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
432 433
433 INIT_LIST_HEAD(path); 434 INIT_LIST_HEAD(path);
434 435
435 if (_coresight_build_path(csdev, path)) { 436 rc = _coresight_build_path(csdev, path);
437 if (rc) {
436 kfree(path); 438 kfree(path);
437 path = NULL; 439 return ERR_PTR(rc);
438 } 440 }
439 441
440 return path; 442 return path;
@@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev)
507 goto out; 509 goto out;
508 510
509 path = coresight_build_path(csdev); 511 path = coresight_build_path(csdev);
510 if (!path) { 512 if (IS_ERR(path)) {
511 pr_err("building path(s) failed\n"); 513 pr_err("building path(s) failed\n");
514 ret = PTR_ERR(path);
512 goto out; 515 goto out;
513 } 516 }
514 517
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 64b1208bca5e..4a60ad214747 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -245,6 +245,13 @@ struct i801_priv {
245 struct platform_device *mux_pdev; 245 struct platform_device *mux_pdev;
246#endif 246#endif
247 struct platform_device *tco_pdev; 247 struct platform_device *tco_pdev;
248
249 /*
250 * If set to true the host controller registers are reserved for
251 * ACPI AML use. Protected by acpi_lock.
252 */
253 bool acpi_reserved;
254 struct mutex acpi_lock;
248}; 255};
249 256
250#define FEATURE_SMBUS_PEC (1 << 0) 257#define FEATURE_SMBUS_PEC (1 << 0)
@@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
718 int ret = 0, xact = 0; 725 int ret = 0, xact = 0;
719 struct i801_priv *priv = i2c_get_adapdata(adap); 726 struct i801_priv *priv = i2c_get_adapdata(adap);
720 727
728 mutex_lock(&priv->acpi_lock);
729 if (priv->acpi_reserved) {
730 mutex_unlock(&priv->acpi_lock);
731 return -EBUSY;
732 }
733
721 pm_runtime_get_sync(&priv->pci_dev->dev); 734 pm_runtime_get_sync(&priv->pci_dev->dev);
722 735
723 hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) 736 hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
@@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
820out: 833out:
821 pm_runtime_mark_last_busy(&priv->pci_dev->dev); 834 pm_runtime_mark_last_busy(&priv->pci_dev->dev);
822 pm_runtime_put_autosuspend(&priv->pci_dev->dev); 835 pm_runtime_put_autosuspend(&priv->pci_dev->dev);
836 mutex_unlock(&priv->acpi_lock);
823 return ret; 837 return ret;
824} 838}
825 839
@@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
1257 priv->tco_pdev = pdev; 1271 priv->tco_pdev = pdev;
1258} 1272}
1259 1273
1274#ifdef CONFIG_ACPI
1275static acpi_status
1276i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1277 u64 *value, void *handler_context, void *region_context)
1278{
1279 struct i801_priv *priv = handler_context;
1280 struct pci_dev *pdev = priv->pci_dev;
1281 acpi_status status;
1282
1283 /*
1284 * Once BIOS AML code touches the OpRegion we warn and inhibit any
1285 * further access from the driver itself. This device is now owned
1286 * by the system firmware.
1287 */
1288 mutex_lock(&priv->acpi_lock);
1289
1290 if (!priv->acpi_reserved) {
1291 priv->acpi_reserved = true;
1292
1293 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
1294 dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
1295
1296 /*
1297 * BIOS is accessing the host controller so prevent it from
1298 * suspending automatically from now on.
1299 */
1300 pm_runtime_get_sync(&pdev->dev);
1301 }
1302
1303 if ((function & ACPI_IO_MASK) == ACPI_READ)
1304 status = acpi_os_read_port(address, (u32 *)value, bits);
1305 else
1306 status = acpi_os_write_port(address, (u32)*value, bits);
1307
1308 mutex_unlock(&priv->acpi_lock);
1309
1310 return status;
1311}
1312
1313static int i801_acpi_probe(struct i801_priv *priv)
1314{
1315 struct acpi_device *adev;
1316 acpi_status status;
1317
1318 adev = ACPI_COMPANION(&priv->pci_dev->dev);
1319 if (adev) {
1320 status = acpi_install_address_space_handler(adev->handle,
1321 ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
1322 NULL, priv);
1323 if (ACPI_SUCCESS(status))
1324 return 0;
1325 }
1326
1327 return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
1328}
1329
1330static void i801_acpi_remove(struct i801_priv *priv)
1331{
1332 struct acpi_device *adev;
1333
1334 adev = ACPI_COMPANION(&priv->pci_dev->dev);
1335 if (!adev)
1336 return;
1337
1338 acpi_remove_address_space_handler(adev->handle,
1339 ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
1340
1341 mutex_lock(&priv->acpi_lock);
1342 if (priv->acpi_reserved)
1343 pm_runtime_put(&priv->pci_dev->dev);
1344 mutex_unlock(&priv->acpi_lock);
1345}
1346#else
1347static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
1348static inline void i801_acpi_remove(struct i801_priv *priv) { }
1349#endif
1350
1260static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) 1351static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1261{ 1352{
1262 unsigned char temp; 1353 unsigned char temp;
@@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1274 priv->adapter.dev.parent = &dev->dev; 1365 priv->adapter.dev.parent = &dev->dev;
1275 ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); 1366 ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
1276 priv->adapter.retries = 3; 1367 priv->adapter.retries = 3;
1368 mutex_init(&priv->acpi_lock);
1277 1369
1278 priv->pci_dev = dev; 1370 priv->pci_dev = dev;
1279 switch (dev->device) { 1371 switch (dev->device) {
@@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 return -ENODEV; 1428 return -ENODEV;
1337 } 1429 }
1338 1430
1339 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); 1431 if (i801_acpi_probe(priv))
1340 if (err) {
1341 return -ENODEV; 1432 return -ENODEV;
1342 }
1343 1433
1344 err = pcim_iomap_regions(dev, 1 << SMBBAR, 1434 err = pcim_iomap_regions(dev, 1 << SMBBAR,
1345 dev_driver_string(&dev->dev)); 1435 dev_driver_string(&dev->dev));
@@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1348 "Failed to request SMBus region 0x%lx-0x%Lx\n", 1438 "Failed to request SMBus region 0x%lx-0x%Lx\n",
1349 priv->smba, 1439 priv->smba,
1350 (unsigned long long)pci_resource_end(dev, SMBBAR)); 1440 (unsigned long long)pci_resource_end(dev, SMBBAR));
1441 i801_acpi_remove(priv);
1351 return err; 1442 return err;
1352 } 1443 }
1353 1444
@@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1412 err = i2c_add_adapter(&priv->adapter); 1503 err = i2c_add_adapter(&priv->adapter);
1413 if (err) { 1504 if (err) {
1414 dev_err(&dev->dev, "Failed to add SMBus adapter\n"); 1505 dev_err(&dev->dev, "Failed to add SMBus adapter\n");
1506 i801_acpi_remove(priv);
1415 return err; 1507 return err;
1416 } 1508 }
1417 1509
@@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev)
1438 1530
1439 i801_del_mux(priv); 1531 i801_del_mux(priv);
1440 i2c_del_adapter(&priv->adapter); 1532 i2c_del_adapter(&priv->adapter);
1533 i801_acpi_remove(priv);
1441 pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); 1534 pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
1442 1535
1443 platform_device_unregister(priv->tco_pdev); 1536 platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index aa5f01efd826..30ae35146723 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
934 return result; 934 return result;
935 935
936 for (i = 0; i < length; i++) { 936 for (i = 0; i < length; i++) {
937 /* for the last byte TWSI_CTL_AAK must not be set */ 937 /*
938 if (i + 1 == length) 938 * For the last byte to receive TWSI_CTL_AAK must not be set.
939 *
940 * A special case is I2C_M_RECV_LEN where we don't know the
941 * additional length yet. If recv_len is set we assume we're
942 * not reading the final byte and therefore need to set
943 * TWSI_CTL_AAK.
944 */
945 if ((i + 1 == length) && !(recv_len && i == 0))
939 final_read = true; 946 final_read = true;
940 947
941 /* clear iflg to allow next event */ 948 /* clear iflg to allow next event */
@@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
950 957
951 data[i] = octeon_i2c_data_read(i2c); 958 data[i] = octeon_i2c_data_read(i2c);
952 if (recv_len && i == 0) { 959 if (recv_len && i == 0) {
953 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { 960 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
954 dev_err(i2c->dev,
955 "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
956 __func__, data[i]);
957 return -EPROTO; 961 return -EPROTO;
958 }
959 length += data[i]; 962 length += data[i];
960 } 963 }
961 964
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 6773cadf7c9f..26e7c5187a58 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
260 .remove = i2c_mux_reg_remove, 260 .remove = i2c_mux_reg_remove,
261 .driver = { 261 .driver = {
262 .name = "i2c-mux-reg", 262 .name = "i2c-mux-reg",
263 .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
263 }, 264 },
264}; 265};
265 266
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index a1e642ee13d6..7fddc137e91e 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
91 91
92int st_accel_allocate_ring(struct iio_dev *indio_dev) 92int st_accel_allocate_ring(struct iio_dev *indio_dev)
93{ 93{
94 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 94 return iio_triggered_buffer_setup(indio_dev, NULL,
95 &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); 95 &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
96} 96}
97 97
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index dc73f2d85e6d..4d95bfc4786c 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -741,6 +741,7 @@ static const struct iio_info accel_info = {
741static const struct iio_trigger_ops st_accel_trigger_ops = { 741static const struct iio_trigger_ops st_accel_trigger_ops = {
742 .owner = THIS_MODULE, 742 .owner = THIS_MODULE,
743 .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, 743 .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
744 .validate_device = st_sensors_validate_device,
744}; 745};
745#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) 746#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
746#else 747#else
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index c55898543a47..f1693dbebb8a 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
57 struct iio_poll_func *pf = p; 57 struct iio_poll_func *pf = p;
58 struct iio_dev *indio_dev = pf->indio_dev; 58 struct iio_dev *indio_dev = pf->indio_dev;
59 struct st_sensor_data *sdata = iio_priv(indio_dev); 59 struct st_sensor_data *sdata = iio_priv(indio_dev);
60 s64 timestamp;
60 61
61 /* If we have a status register, check if this IRQ came from us */ 62 /* If we do timetamping here, do it before reading the values */
62 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { 63 if (sdata->hw_irq_trigger)
63 u8 status; 64 timestamp = sdata->hw_timestamp;
64 65 else
65 len = sdata->tf->read_byte(&sdata->tb, sdata->dev, 66 timestamp = iio_get_time_ns();
66 sdata->sensor_settings->drdy_irq.addr_stat_drdy,
67 &status);
68 if (len < 0)
69 dev_err(sdata->dev, "could not read channel status\n");
70
71 /*
72 * If this was not caused by any channels on this sensor,
73 * return IRQ_NONE
74 */
75 if (!(status & (u8)indio_dev->active_scan_mask[0]))
76 return IRQ_NONE;
77 }
78 67
79 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); 68 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
80 if (len < 0) 69 if (len < 0)
81 goto st_sensors_get_buffer_element_error; 70 goto st_sensors_get_buffer_element_error;
82 71
83 iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, 72 iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
84 pf->timestamp); 73 timestamp);
85 74
86st_sensors_get_buffer_element_error: 75st_sensors_get_buffer_element_error:
87 iio_trigger_notify_done(indio_dev->trig); 76 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index dffe00692169..9e59c90f6a8d 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
363 if (err < 0) 363 if (err < 0)
364 return err; 364 return err;
365 365
366 /* Disable DRDY, this might be still be enabled after reboot. */
367 err = st_sensors_set_dataready_irq(indio_dev, false);
368 if (err < 0)
369 return err;
370
366 if (sdata->current_fullscale) { 371 if (sdata->current_fullscale) {
367 err = st_sensors_set_fullscale(indio_dev, 372 err = st_sensors_set_fullscale(indio_dev,
368 sdata->current_fullscale->num); 373 sdata->current_fullscale->num);
@@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
424 else 429 else
425 drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; 430 drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
426 431
432 /* Flag to the poll function that the hardware trigger is in use */
433 sdata->hw_irq_trigger = enable;
434
427 /* Enable/Disable the interrupt generator for data ready. */ 435 /* Enable/Disable the interrupt generator for data ready. */
428 err = st_sensors_write_data_with_mask(indio_dev, 436 err = st_sensors_write_data_with_mask(indio_dev,
429 sdata->sensor_settings->drdy_irq.addr, 437 sdata->sensor_settings->drdy_irq.addr,
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index da72279fcf99..296e4ff19ae8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -17,6 +17,73 @@
17#include <linux/iio/common/st_sensors.h> 17#include <linux/iio/common/st_sensors.h>
18#include "st_sensors_core.h" 18#include "st_sensors_core.h"
19 19
20/**
21 * st_sensors_irq_handler() - top half of the IRQ-based triggers
22 * @irq: irq number
23 * @p: private handler data
24 */
25irqreturn_t st_sensors_irq_handler(int irq, void *p)
26{
27 struct iio_trigger *trig = p;
28 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
29 struct st_sensor_data *sdata = iio_priv(indio_dev);
30
31 /* Get the time stamp as close in time as possible */
32 sdata->hw_timestamp = iio_get_time_ns();
33 return IRQ_WAKE_THREAD;
34}
35
36/**
37 * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
38 * @irq: irq number
39 * @p: private handler data
40 */
41irqreturn_t st_sensors_irq_thread(int irq, void *p)
42{
43 struct iio_trigger *trig = p;
44 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
45 struct st_sensor_data *sdata = iio_priv(indio_dev);
46 int ret;
47
48 /*
49 * If this trigger is backed by a hardware interrupt and we have a
50 * status register, check if this IRQ came from us
51 */
52 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
53 u8 status;
54
55 ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
56 sdata->sensor_settings->drdy_irq.addr_stat_drdy,
57 &status);
58 if (ret < 0) {
59 dev_err(sdata->dev, "could not read channel status\n");
60 goto out_poll;
61 }
62 /*
63 * the lower bits of .active_scan_mask[0] is directly mapped
64 * to the channels on the sensor: either bit 0 for
65 * one-dimensional sensors, or e.g. x,y,z for accelerometers,
66 * gyroscopes or magnetometers. No sensor use more than 3
67 * channels, so cut the other status bits here.
68 */
69 status &= 0x07;
70
71 /*
72 * If this was not caused by any channels on this sensor,
73 * return IRQ_NONE
74 */
75 if (!indio_dev->active_scan_mask)
76 return IRQ_NONE;
77 if (!(status & (u8)indio_dev->active_scan_mask[0]))
78 return IRQ_NONE;
79 }
80
81out_poll:
82 /* It's our IRQ: proceed to handle the register polling */
83 iio_trigger_poll_chained(p);
84 return IRQ_HANDLED;
85}
86
20int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 87int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
21 const struct iio_trigger_ops *trigger_ops) 88 const struct iio_trigger_ops *trigger_ops)
22{ 89{
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
30 return -ENOMEM; 97 return -ENOMEM;
31 } 98 }
32 99
100 iio_trigger_set_drvdata(sdata->trig, indio_dev);
101 sdata->trig->ops = trigger_ops;
102 sdata->trig->dev.parent = sdata->dev;
103
33 irq = sdata->get_irq_data_ready(indio_dev); 104 irq = sdata->get_irq_data_ready(indio_dev);
34 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); 105 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
35 /* 106 /*
@@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
77 sdata->sensor_settings->drdy_irq.addr_stat_drdy) 148 sdata->sensor_settings->drdy_irq.addr_stat_drdy)
78 irq_trig |= IRQF_SHARED; 149 irq_trig |= IRQF_SHARED;
79 150
80 err = request_threaded_irq(irq, 151 /* Let's create an interrupt thread masking the hard IRQ here */
81 iio_trigger_generic_data_rdy_poll, 152 irq_trig |= IRQF_ONESHOT;
82 NULL, 153
154 err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
155 st_sensors_irq_handler,
156 st_sensors_irq_thread,
83 irq_trig, 157 irq_trig,
84 sdata->trig->name, 158 sdata->trig->name,
85 sdata->trig); 159 sdata->trig);
@@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
88 goto iio_trigger_free; 162 goto iio_trigger_free;
89 } 163 }
90 164
91 iio_trigger_set_drvdata(sdata->trig, indio_dev);
92 sdata->trig->ops = trigger_ops;
93 sdata->trig->dev.parent = sdata->dev;
94
95 err = iio_trigger_register(sdata->trig); 165 err = iio_trigger_register(sdata->trig);
96 if (err < 0) { 166 if (err < 0) {
97 dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); 167 dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
119} 189}
120EXPORT_SYMBOL(st_sensors_deallocate_trigger); 190EXPORT_SYMBOL(st_sensors_deallocate_trigger);
121 191
192int st_sensors_validate_device(struct iio_trigger *trig,
193 struct iio_dev *indio_dev)
194{
195 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
196
197 if (indio != indio_dev)
198 return -EINVAL;
199
200 return 0;
201}
202EXPORT_SYMBOL(st_sensors_validate_device);
203
122MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); 204MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
123MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); 205MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
124MODULE_LICENSE("GPL v2"); 206MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e63b957c985f..f7c71da42f15 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -247,7 +247,7 @@ config MCP4922
247 247
248config STX104 248config STX104
249 tristate "Apex Embedded Systems STX104 DAC driver" 249 tristate "Apex Embedded Systems STX104 DAC driver"
250 depends on X86 && ISA 250 depends on X86 && ISA_BUS_API
251 help 251 help
252 Say yes here to build support for the 2-channel DAC on the Apex 252 Say yes here to build support for the 2-channel DAC on the Apex
253 Embedded Systems STX104 integrated analog PC/104 card. The base port 253 Embedded Systems STX104 integrated analog PC/104 card. The base port
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 948f600e7059..69bde5909854 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st)
525 525
526 device_for_each_child_node(st->dev, child) { 526 device_for_each_child_node(st->dev, child) {
527 ret = fwnode_property_read_u32(child, "reg", &reg); 527 ret = fwnode_property_read_u32(child, "reg", &reg);
528 if (ret || reg > ARRAY_SIZE(st->channel_modes)) 528 if (ret || reg >= ARRAY_SIZE(st->channel_modes))
529 continue; 529 continue;
530 530
531 ret = fwnode_property_read_u32(child, "adi,mode", &tmp); 531 ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index d67b17b6a7aa..a5377044e42f 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
91 91
92int st_gyro_allocate_ring(struct iio_dev *indio_dev) 92int st_gyro_allocate_ring(struct iio_dev *indio_dev)
93{ 93{
94 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 94 return iio_triggered_buffer_setup(indio_dev, NULL,
95 &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); 95 &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
96} 96}
97 97
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 52a3c87c375c..a8012955a1f6 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -409,6 +409,7 @@ static const struct iio_info gyro_info = {
409static const struct iio_trigger_ops st_gyro_trigger_ops = { 409static const struct iio_trigger_ops st_gyro_trigger_ops = {
410 .owner = THIS_MODULE, 410 .owner = THIS_MODULE,
411 .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, 411 .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
412 .validate_device = st_sensors_validate_device,
412}; 413};
413#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) 414#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
414#else 415#else
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 3be6d209a159..11535911a5c6 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
165 struct am2315_sensor_data sensor_data; 165 struct am2315_sensor_data sensor_data;
166 166
167 ret = am2315_read_data(data, &sensor_data); 167 ret = am2315_read_data(data, &sensor_data);
168 if (ret < 0) { 168 if (ret < 0)
169 mutex_unlock(&data->lock);
170 goto err; 169 goto err;
171 }
172 170
173 mutex_lock(&data->lock); 171 mutex_lock(&data->lock);
174 if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { 172 if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index fa4767613173..a03832a5fc95 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -55,7 +55,7 @@ static const struct {
55 }, 55 },
56 { /* IIO_HUMIDITYRELATIVE channel */ 56 { /* IIO_HUMIDITYRELATIVE channel */
57 .shift = 8, 57 .shift = 8,
58 .mask = 2, 58 .mask = 3,
59 }, 59 },
60}; 60};
61 61
@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
164 dev_err(&client->dev, "cannot read high byte measurement"); 164 dev_err(&client->dev, "cannot read high byte measurement");
165 return ret; 165 return ret;
166 } 166 }
167 val = ret << 6; 167 val = ret << 8;
168 168
169 ret = i2c_smbus_read_byte(client); 169 ret = i2c_smbus_read_byte(client);
170 if (ret < 0) { 170 if (ret < 0) {
171 dev_err(&client->dev, "cannot read low byte measurement"); 171 dev_err(&client->dev, "cannot read low byte measurement");
172 return ret; 172 return ret;
173 } 173 }
174 val |= ret >> 2; 174 val |= ret;
175 175
176 return val; 176 return val;
177} 177}
@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
211 return IIO_VAL_INT_PLUS_MICRO; 211 return IIO_VAL_INT_PLUS_MICRO;
212 case IIO_CHAN_INFO_SCALE: 212 case IIO_CHAN_INFO_SCALE:
213 if (chan->type == IIO_TEMP) { 213 if (chan->type == IIO_TEMP) {
214 *val = 165; 214 *val = 165000;
215 *val2 = 65536 >> 2; 215 *val2 = 65536;
216 return IIO_VAL_FRACTIONAL; 216 return IIO_VAL_FRACTIONAL;
217 } else { 217 } else {
218 *val = 0; 218 *val = 100;
219 *val2 = 10000; 219 *val2 = 65536;
220 return IIO_VAL_INT_PLUS_MICRO; 220 return IIO_VAL_FRACTIONAL;
221 } 221 }
222 break; 222 break;
223 case IIO_CHAN_INFO_OFFSET: 223 case IIO_CHAN_INFO_OFFSET:
224 *val = -3971; 224 *val = -15887;
225 *val2 = 879096; 225 *val2 = 515151;
226 return IIO_VAL_INT_PLUS_MICRO; 226 return IIO_VAL_INT_PLUS_MICRO;
227 default: 227 default:
228 return -EINVAL; 228 return -EINVAL;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 0bf92b06d7d8..b8a290ec984e 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -209,11 +209,11 @@ static const struct bmi160_scale_item bmi160_scale_table[] = {
209}; 209};
210 210
211static const struct bmi160_odr bmi160_accel_odr[] = { 211static const struct bmi160_odr bmi160_accel_odr[] = {
212 {0x01, 0, 78125}, 212 {0x01, 0, 781250},
213 {0x02, 1, 5625}, 213 {0x02, 1, 562500},
214 {0x03, 3, 125}, 214 {0x03, 3, 125000},
215 {0x04, 6, 25}, 215 {0x04, 6, 250000},
216 {0x05, 12, 5}, 216 {0x05, 12, 500000},
217 {0x06, 25, 0}, 217 {0x06, 25, 0},
218 {0x07, 50, 0}, 218 {0x07, 50, 0},
219 {0x08, 100, 0}, 219 {0x08, 100, 0},
@@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = {
229 {0x08, 100, 0}, 229 {0x08, 100, 0},
230 {0x09, 200, 0}, 230 {0x09, 200, 0},
231 {0x0A, 400, 0}, 231 {0x0A, 400, 0},
232 {0x0B, 8000, 0}, 232 {0x0B, 800, 0},
233 {0x0C, 1600, 0}, 233 {0x0C, 1600, 0},
234 {0x0D, 3200, 0}, 234 {0x0D, 3200, 0},
235}; 235};
@@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
364 364
365 return regmap_update_bits(data->regmap, 365 return regmap_update_bits(data->regmap,
366 bmi160_regs[t].config, 366 bmi160_regs[t].config,
367 bmi160_odr_table[t].tbl[i].bits, 367 bmi160_regs[t].config_odr_mask,
368 bmi160_regs[t].config_odr_mask); 368 bmi160_odr_table[t].tbl[i].bits);
369} 369}
370 370
371static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, 371static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ae2806aafb72..0c52dfe64977 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
210 210
211 /* Prevent the module from being removed whilst attached to a trigger */ 211 /* Prevent the module from being removed whilst attached to a trigger */
212 __module_get(pf->indio_dev->info->driver_module); 212 __module_get(pf->indio_dev->info->driver_module);
213
214 /* Get irq number */
213 pf->irq = iio_trigger_get_irq(trig); 215 pf->irq = iio_trigger_get_irq(trig);
216 if (pf->irq < 0)
217 goto out_put_module;
218
219 /* Request irq */
214 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, 220 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
215 pf->type, pf->name, 221 pf->type, pf->name,
216 pf); 222 pf);
217 if (ret < 0) { 223 if (ret < 0)
218 module_put(pf->indio_dev->info->driver_module); 224 goto out_put_irq;
219 return ret;
220 }
221 225
226 /* Enable trigger in driver */
222 if (trig->ops && trig->ops->set_trigger_state && notinuse) { 227 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
223 ret = trig->ops->set_trigger_state(trig, true); 228 ret = trig->ops->set_trigger_state(trig, true);
224 if (ret < 0) 229 if (ret < 0)
225 module_put(pf->indio_dev->info->driver_module); 230 goto out_free_irq;
226 } 231 }
227 232
228 return ret; 233 return ret;
234
235out_free_irq:
236 free_irq(pf->irq, pf);
237out_put_irq:
238 iio_trigger_put_irq(trig, pf->irq);
239out_put_module:
240 module_put(pf->indio_dev->info->driver_module);
241 return ret;
229} 242}
230 243
231static int iio_trigger_detach_poll_func(struct iio_trigger *trig, 244static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b4dbb3912977..651d57b8abbf 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client,
1011 1011
1012 iio_device_attach_buffer(indio_dev, buffer); 1012 iio_device_attach_buffer(indio_dev, buffer);
1013 1013
1014 indio_dev->dev.parent = &client->dev;
1014 indio_dev->info = &apds9960_info; 1015 indio_dev->info = &apds9960_info;
1015 indio_dev->name = APDS9960_DRV_NAME; 1016 indio_dev->name = APDS9960_DRV_NAME;
1016 indio_dev->channels = apds9960_channels; 1017 indio_dev->channels = apds9960_channels;
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index 72b364e4aa72..b54dcba05a82 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
84 int ret; 84 int ret;
85 85
86 if (!readval) 86 if (!readval)
87 bh1780_write(bh1780, (u8)reg, (u8)writeval); 87 return bh1780_write(bh1780, (u8)reg, (u8)writeval);
88 88
89 ret = bh1780_read(bh1780, (u8)reg); 89 ret = bh1780_read(bh1780, (u8)reg);
90 if (ret < 0) 90 if (ret < 0)
@@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client,
187 187
188 indio_dev->dev.parent = &client->dev; 188 indio_dev->dev.parent = &client->dev;
189 indio_dev->info = &bh1780_info; 189 indio_dev->info = &bh1780_info;
190 indio_dev->name = id->name; 190 indio_dev->name = "bh1780";
191 indio_dev->channels = bh1780_channels; 191 indio_dev->channels = bh1780_channels;
192 indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); 192 indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
193 indio_dev->modes = INDIO_DIRECT_MODE; 193 indio_dev->modes = INDIO_DIRECT_MODE;
@@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client)
226static int bh1780_runtime_suspend(struct device *dev) 226static int bh1780_runtime_suspend(struct device *dev)
227{ 227{
228 struct i2c_client *client = to_i2c_client(dev); 228 struct i2c_client *client = to_i2c_client(dev);
229 struct bh1780_data *bh1780 = i2c_get_clientdata(client); 229 struct iio_dev *indio_dev = i2c_get_clientdata(client);
230 struct bh1780_data *bh1780 = iio_priv(indio_dev);
230 int ret; 231 int ret;
231 232
232 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); 233 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
@@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev)
241static int bh1780_runtime_resume(struct device *dev) 242static int bh1780_runtime_resume(struct device *dev)
242{ 243{
243 struct i2c_client *client = to_i2c_client(dev); 244 struct i2c_client *client = to_i2c_client(dev);
244 struct bh1780_data *bh1780 = i2c_get_clientdata(client); 245 struct iio_dev *indio_dev = i2c_get_clientdata(client);
246 struct bh1780_data *bh1780 = iio_priv(indio_dev);
245 int ret; 247 int ret;
246 248
247 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); 249 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index e01e58a9bd14..f17cb2ea18f5 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = {
147 { 147 {
148 .type = IIO_PROXIMITY, 148 .type = IIO_PROXIMITY,
149 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 149 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
150 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
151 .scan_index = MAX44000_SCAN_INDEX_PRX, 150 .scan_index = MAX44000_SCAN_INDEX_PRX,
152 .scan_type = { 151 .scan_type = {
153 .sign = 'u', 152 .sign = 'u',
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index ecd3bd0a9769..0a9e8fadfa9d 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
82 82
83int st_magn_allocate_ring(struct iio_dev *indio_dev) 83int st_magn_allocate_ring(struct iio_dev *indio_dev)
84{ 84{
85 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 85 return iio_triggered_buffer_setup(indio_dev, NULL,
86 &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); 86 &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
87} 87}
88 88
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 62036d2a9956..8250fc322c56 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -572,6 +572,7 @@ static const struct iio_info magn_info = {
572static const struct iio_trigger_ops st_magn_trigger_ops = { 572static const struct iio_trigger_ops st_magn_trigger_ops = {
573 .owner = THIS_MODULE, 573 .owner = THIS_MODULE,
574 .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, 574 .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
575 .validate_device = st_sensors_validate_device,
575}; 576};
576#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) 577#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
577#else 578#else
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index 2f1498e12bb2..724452d61846 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client,
879 if (ret < 0) 879 if (ret < 0)
880 return ret; 880 return ret;
881 if (chip_id != id->driver_data) { 881 if (chip_id != id->driver_data) {
882 dev_err(&client->dev, "bad chip id. expected %x got %x\n", 882 dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
883 BMP280_CHIP_ID, chip_id); 883 id->driver_data, chip_id);
884 return -EINVAL; 884 return -EINVAL;
885 } 885 }
886 886
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index 2ff53f222352..99468d0a64e7 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
82 82
83int st_press_allocate_ring(struct iio_dev *indio_dev) 83int st_press_allocate_ring(struct iio_dev *indio_dev)
84{ 84{
85 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 85 return iio_triggered_buffer_setup(indio_dev, NULL,
86 &st_sensors_trigger_handler, &st_press_buffer_setup_ops); 86 &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
87} 87}
88 88
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 9e9b72a8f18f..92a118c3c4ac 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -28,15 +28,21 @@
28#include <linux/iio/common/st_sensors.h> 28#include <linux/iio/common/st_sensors.h>
29#include "st_pressure.h" 29#include "st_pressure.h"
30 30
31#define MCELSIUS_PER_CELSIUS 1000
32
33/* Default pressure sensitivity */
31#define ST_PRESS_LSB_PER_MBAR 4096UL 34#define ST_PRESS_LSB_PER_MBAR 4096UL
32#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ 35#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
33 ST_PRESS_LSB_PER_MBAR) 36 ST_PRESS_LSB_PER_MBAR)
37
38/* Default temperature sensitivity */
34#define ST_PRESS_LSB_PER_CELSIUS 480UL 39#define ST_PRESS_LSB_PER_CELSIUS 480UL
35#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ 40#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
36 ST_PRESS_LSB_PER_CELSIUS) 41
37#define ST_PRESS_NUMBER_DATA_CHANNELS 1 42#define ST_PRESS_NUMBER_DATA_CHANNELS 1
38 43
39/* FULLSCALE */ 44/* FULLSCALE */
45#define ST_PRESS_FS_AVL_1100MB 1100
40#define ST_PRESS_FS_AVL_1260MB 1260 46#define ST_PRESS_FS_AVL_1260MB 1260
41 47
42#define ST_PRESS_1_OUT_XL_ADDR 0x28 48#define ST_PRESS_1_OUT_XL_ADDR 0x28
@@ -54,9 +60,6 @@
54#define ST_PRESS_LPS331AP_PW_MASK 0x80 60#define ST_PRESS_LPS331AP_PW_MASK 0x80
55#define ST_PRESS_LPS331AP_FS_ADDR 0x23 61#define ST_PRESS_LPS331AP_FS_ADDR 0x23
56#define ST_PRESS_LPS331AP_FS_MASK 0x30 62#define ST_PRESS_LPS331AP_FS_MASK 0x30
57#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
58#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
59#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
60#define ST_PRESS_LPS331AP_BDU_ADDR 0x20 63#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
61#define ST_PRESS_LPS331AP_BDU_MASK 0x04 64#define ST_PRESS_LPS331AP_BDU_MASK 0x04
62#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 65#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
@@ -67,9 +70,14 @@
67#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 70#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
68#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 71#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
69#define ST_PRESS_LPS331AP_MULTIREAD_BIT true 72#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
70#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
71 73
72/* CUSTOM VALUES FOR LPS001WP SENSOR */ 74/* CUSTOM VALUES FOR LPS001WP SENSOR */
75
76/* LPS001WP pressure resolution */
77#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
78/* LPS001WP temperature resolution */
79#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
80
73#define ST_PRESS_LPS001WP_WAI_EXP 0xba 81#define ST_PRESS_LPS001WP_WAI_EXP 0xba
74#define ST_PRESS_LPS001WP_ODR_ADDR 0x20 82#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
75#define ST_PRESS_LPS001WP_ODR_MASK 0x30 83#define ST_PRESS_LPS001WP_ODR_MASK 0x30
@@ -78,6 +86,8 @@
78#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 86#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
79#define ST_PRESS_LPS001WP_PW_ADDR 0x20 87#define ST_PRESS_LPS001WP_PW_ADDR 0x20
80#define ST_PRESS_LPS001WP_PW_MASK 0x40 88#define ST_PRESS_LPS001WP_PW_MASK 0x40
89#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
90 (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
81#define ST_PRESS_LPS001WP_BDU_ADDR 0x20 91#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
82#define ST_PRESS_LPS001WP_BDU_MASK 0x04 92#define ST_PRESS_LPS001WP_BDU_MASK 0x04
83#define ST_PRESS_LPS001WP_MULTIREAD_BIT true 93#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
@@ -94,11 +104,6 @@
94#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 104#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
95#define ST_PRESS_LPS25H_PW_ADDR 0x20 105#define ST_PRESS_LPS25H_PW_ADDR 0x20
96#define ST_PRESS_LPS25H_PW_MASK 0x80 106#define ST_PRESS_LPS25H_PW_MASK 0x80
97#define ST_PRESS_LPS25H_FS_ADDR 0x00
98#define ST_PRESS_LPS25H_FS_MASK 0x00
99#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
100#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
101#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
102#define ST_PRESS_LPS25H_BDU_ADDR 0x20 107#define ST_PRESS_LPS25H_BDU_ADDR 0x20
103#define ST_PRESS_LPS25H_BDU_MASK 0x04 108#define ST_PRESS_LPS25H_BDU_MASK 0x04
104#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 109#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
@@ -109,7 +114,6 @@
109#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 114#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
110#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 115#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
111#define ST_PRESS_LPS25H_MULTIREAD_BIT true 116#define ST_PRESS_LPS25H_MULTIREAD_BIT true
112#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
113#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 117#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
114#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b 118#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
115 119
@@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
161 .storagebits = 16, 165 .storagebits = 16,
162 .endianness = IIO_LE, 166 .endianness = IIO_LE,
163 }, 167 },
164 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 168 .info_mask_separate =
169 BIT(IIO_CHAN_INFO_RAW) |
170 BIT(IIO_CHAN_INFO_SCALE),
165 .modified = 0, 171 .modified = 0,
166 }, 172 },
167 { 173 {
@@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
177 }, 183 },
178 .info_mask_separate = 184 .info_mask_separate =
179 BIT(IIO_CHAN_INFO_RAW) | 185 BIT(IIO_CHAN_INFO_RAW) |
180 BIT(IIO_CHAN_INFO_OFFSET), 186 BIT(IIO_CHAN_INFO_SCALE),
181 .modified = 0, 187 .modified = 0,
182 }, 188 },
183 IIO_CHAN_SOFT_TIMESTAMP(1) 189 IIO_CHAN_SOFT_TIMESTAMP(1)
@@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
212 .addr = ST_PRESS_LPS331AP_FS_ADDR, 218 .addr = ST_PRESS_LPS331AP_FS_ADDR,
213 .mask = ST_PRESS_LPS331AP_FS_MASK, 219 .mask = ST_PRESS_LPS331AP_FS_MASK,
214 .fs_avl = { 220 .fs_avl = {
221 /*
222 * Pressure and temperature sensitivity values
223 * as defined in table 3 of LPS331AP datasheet.
224 */
215 [0] = { 225 [0] = {
216 .num = ST_PRESS_FS_AVL_1260MB, 226 .num = ST_PRESS_FS_AVL_1260MB,
217 .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, 227 .gain = ST_PRESS_KPASCAL_NANO_SCALE,
218 .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, 228 .gain2 = ST_PRESS_LSB_PER_CELSIUS,
219 .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
220 }, 229 },
221 }, 230 },
222 }, 231 },
@@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
261 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 270 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
262 }, 271 },
263 .fs = { 272 .fs = {
264 .addr = 0, 273 .fs_avl = {
274 /*
275 * Pressure and temperature resolution values
276 * as defined in table 3 of LPS001WP datasheet.
277 */
278 [0] = {
279 .num = ST_PRESS_FS_AVL_1100MB,
280 .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
281 .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
282 },
283 },
265 }, 284 },
266 .bdu = { 285 .bdu = {
267 .addr = ST_PRESS_LPS001WP_BDU_ADDR, 286 .addr = ST_PRESS_LPS001WP_BDU_ADDR,
@@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
298 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 317 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
299 }, 318 },
300 .fs = { 319 .fs = {
301 .addr = ST_PRESS_LPS25H_FS_ADDR,
302 .mask = ST_PRESS_LPS25H_FS_MASK,
303 .fs_avl = { 320 .fs_avl = {
321 /*
322 * Pressure and temperature sensitivity values
323 * as defined in table 3 of LPS25H datasheet.
324 */
304 [0] = { 325 [0] = {
305 .num = ST_PRESS_FS_AVL_1260MB, 326 .num = ST_PRESS_FS_AVL_1260MB,
306 .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, 327 .gain = ST_PRESS_KPASCAL_NANO_SCALE,
307 .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, 328 .gain2 = ST_PRESS_LSB_PER_CELSIUS,
308 .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
309 }, 329 },
310 }, 330 },
311 }, 331 },
@@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
364 384
365 return IIO_VAL_INT; 385 return IIO_VAL_INT;
366 case IIO_CHAN_INFO_SCALE: 386 case IIO_CHAN_INFO_SCALE:
367 *val = 0;
368
369 switch (ch->type) { 387 switch (ch->type) {
370 case IIO_PRESSURE: 388 case IIO_PRESSURE:
389 *val = 0;
371 *val2 = press_data->current_fullscale->gain; 390 *val2 = press_data->current_fullscale->gain;
372 break; 391 return IIO_VAL_INT_PLUS_NANO;
373 case IIO_TEMP: 392 case IIO_TEMP:
393 *val = MCELSIUS_PER_CELSIUS;
374 *val2 = press_data->current_fullscale->gain2; 394 *val2 = press_data->current_fullscale->gain2;
375 break; 395 return IIO_VAL_FRACTIONAL;
376 default: 396 default:
377 err = -EINVAL; 397 err = -EINVAL;
378 goto read_error; 398 goto read_error;
379 } 399 }
380 400
381 return IIO_VAL_INT_PLUS_NANO;
382 case IIO_CHAN_INFO_OFFSET: 401 case IIO_CHAN_INFO_OFFSET:
383 switch (ch->type) { 402 switch (ch->type) {
384 case IIO_TEMP: 403 case IIO_TEMP:
385 *val = 425; 404 *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
386 *val2 = 10; 405 press_data->current_fullscale->gain2;
406 *val2 = MCELSIUS_PER_CELSIUS;
387 break; 407 break;
388 default: 408 default:
389 err = -EINVAL; 409 err = -EINVAL;
@@ -425,6 +445,7 @@ static const struct iio_info press_info = {
425static const struct iio_trigger_ops st_press_trigger_ops = { 445static const struct iio_trigger_ops st_press_trigger_ops = {
426 .owner = THIS_MODULE, 446 .owner = THIS_MODULE,
427 .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, 447 .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
448 .validate_device = st_sensors_validate_device,
428}; 449};
429#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) 450#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
430#else 451#else
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index f4d29d5dbd5f..e2f926cdcad2 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -64,6 +64,7 @@ struct as3935_state {
64 struct delayed_work work; 64 struct delayed_work work;
65 65
66 u32 tune_cap; 66 u32 tune_cap;
67 u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
67 u8 buf[2] ____cacheline_aligned; 68 u8 buf[2] ____cacheline_aligned;
68}; 69};
69 70
@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
72 .type = IIO_PROXIMITY, 73 .type = IIO_PROXIMITY,
73 .info_mask_separate = 74 .info_mask_separate =
74 BIT(IIO_CHAN_INFO_RAW) | 75 BIT(IIO_CHAN_INFO_RAW) |
75 BIT(IIO_CHAN_INFO_PROCESSED), 76 BIT(IIO_CHAN_INFO_PROCESSED) |
77 BIT(IIO_CHAN_INFO_SCALE),
76 .scan_index = 0, 78 .scan_index = 0,
77 .scan_type = { 79 .scan_type = {
78 .sign = 'u', 80 .sign = 'u',
@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
181 /* storm out of range */ 183 /* storm out of range */
182 if (*val == AS3935_DATA_MASK) 184 if (*val == AS3935_DATA_MASK)
183 return -EINVAL; 185 return -EINVAL;
184 *val *= 1000; 186
187 if (m == IIO_CHAN_INFO_PROCESSED)
188 *val *= 1000;
189 break;
190 case IIO_CHAN_INFO_SCALE:
191 *val = 1000;
185 break; 192 break;
186 default: 193 default:
187 return -EINVAL; 194 return -EINVAL;
@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
206 ret = as3935_read(st, AS3935_DATA, &val); 213 ret = as3935_read(st, AS3935_DATA, &val);
207 if (ret) 214 if (ret)
208 goto err_read; 215 goto err_read;
209 val &= AS3935_DATA_MASK;
210 val *= 1000;
211 216
212 iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); 217 st->buffer[0] = val & AS3935_DATA_MASK;
218 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
219 pf->timestamp);
213err_read: 220err_read:
214 iio_trigger_notify_done(indio_dev->trig); 221 iio_trigger_notify_done(indio_dev->trig);
215 222
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index c2e257d97eff..1a2984c28b95 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
178{ 178{
179 int ret = 0; 179 int ret = 0;
180 struct net_device *old_net_dev; 180 struct net_device *old_net_dev;
181 enum ib_gid_type old_gid_type;
181 182
182 /* in rdma_cap_roce_gid_table, this funciton should be protected by a 183 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
183 * sleep-able lock. 184 * sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
199 } 200 }
200 201
201 old_net_dev = table->data_vec[ix].attr.ndev; 202 old_net_dev = table->data_vec[ix].attr.ndev;
203 old_gid_type = table->data_vec[ix].attr.gid_type;
202 if (old_net_dev && old_net_dev != attr->ndev) 204 if (old_net_dev && old_net_dev != attr->ndev)
203 dev_put(old_net_dev); 205 dev_put(old_net_dev);
204 /* if modify_gid failed, just delete the old gid */ 206 /* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
207 attr = &zattr; 209 attr = &zattr;
208 table->data_vec[ix].context = NULL; 210 table->data_vec[ix].context = NULL;
209 } 211 }
210 if (default_gid) 212
211 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
212 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); 213 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
213 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); 214 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
215 if (default_gid) {
216 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217 if (action == GID_TABLE_WRITE_ACTION_DEL)
218 table->data_vec[ix].attr.gid_type = old_gid_type;
219 }
214 if (table->data_vec[ix].attr.ndev && 220 if (table->data_vec[ix].attr.ndev &&
215 table->data_vec[ix].attr.ndev != old_net_dev) 221 table->data_vec[ix].attr.ndev != old_net_dev)
216 dev_hold(table->data_vec[ix].attr.ndev); 222 dev_hold(table->data_vec[ix].attr.ndev);
@@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
405 411
406 for (ix = 0; ix < table->sz; ix++) 412 for (ix = 0; ix < table->sz; ix++)
407 if (table->data_vec[ix].attr.ndev == ndev) 413 if (table->data_vec[ix].attr.ndev == ndev)
408 if (!del_gid(ib_dev, port, table, ix, false)) 414 if (!del_gid(ib_dev, port, table, ix,
415 !!(table->data_vec[ix].props &
416 GID_TABLE_ENTRY_DEFAULT)))
409 deleted = true; 417 deleted = true;
410 418
411 write_unlock_irq(&table->rwlock); 419 write_unlock_irq(&table->rwlock);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1d92e091e22e..c99525512b34 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
3452 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3452 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3453 3453
3454 /* Check if the device started its remove_one */ 3454 /* Check if the device started its remove_one */
3455 spin_lock_irq(&cm.lock); 3455 spin_lock_irqsave(&cm.lock, flags);
3456 if (!cm_dev->going_down) { 3456 if (!cm_dev->going_down) {
3457 queue_delayed_work(cm.wq, &work->work, 0); 3457 queue_delayed_work(cm.wq, &work->work, 0);
3458 } else { 3458 } else {
3459 kfree(work); 3459 kfree(work);
3460 ret = -ENODEV; 3460 ret = -ENODEV;
3461 } 3461 }
3462 spin_unlock_irq(&cm.lock); 3462 spin_unlock_irqrestore(&cm.lock, flags);
3463 3463
3464out: 3464out:
3465 return ret; 3465 return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f0c91ba3178a..ad1b1adcf6f0 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
708 complete(&id_priv->comp); 708 complete(&id_priv->comp);
709} 709}
710 710
711static int cma_disable_callback(struct rdma_id_private *id_priv,
712 enum rdma_cm_state state)
713{
714 mutex_lock(&id_priv->handler_mutex);
715 if (id_priv->state != state) {
716 mutex_unlock(&id_priv->handler_mutex);
717 return -EINVAL;
718 }
719 return 0;
720}
721
722struct rdma_cm_id *rdma_create_id(struct net *net, 711struct rdma_cm_id *rdma_create_id(struct net *net,
723 rdma_cm_event_handler event_handler, 712 rdma_cm_event_handler event_handler,
724 void *context, enum rdma_port_space ps, 713 void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1671 struct rdma_cm_event event; 1660 struct rdma_cm_event event;
1672 int ret = 0; 1661 int ret = 0;
1673 1662
1663 mutex_lock(&id_priv->handler_mutex);
1674 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1664 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1675 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || 1665 id_priv->state != RDMA_CM_CONNECT) ||
1676 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1666 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1677 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) 1667 id_priv->state != RDMA_CM_DISCONNECT))
1678 return 0; 1668 goto out;
1679 1669
1680 memset(&event, 0, sizeof event); 1670 memset(&event, 0, sizeof event);
1681 switch (ib_event->event) { 1671 switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
1870 1860
1871static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1861static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1872{ 1862{
1873 struct rdma_id_private *listen_id, *conn_id; 1863 struct rdma_id_private *listen_id, *conn_id = NULL;
1874 struct rdma_cm_event event; 1864 struct rdma_cm_event event;
1875 struct net_device *net_dev; 1865 struct net_device *net_dev;
1876 int offset, ret; 1866 int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1884 goto net_dev_put; 1874 goto net_dev_put;
1885 } 1875 }
1886 1876
1887 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { 1877 mutex_lock(&listen_id->handler_mutex);
1878 if (listen_id->state != RDMA_CM_LISTEN) {
1888 ret = -ECONNABORTED; 1879 ret = -ECONNABORTED;
1889 goto net_dev_put; 1880 goto err1;
1890 } 1881 }
1891 1882
1892 memset(&event, 0, sizeof event); 1883 memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1976 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 1967 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1977 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 1968 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
1978 1969
1979 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 1970 mutex_lock(&id_priv->handler_mutex);
1980 return 0; 1971 if (id_priv->state != RDMA_CM_CONNECT)
1972 goto out;
1981 1973
1982 memset(&event, 0, sizeof event); 1974 memset(&event, 0, sizeof event);
1983 switch (iw_event->event) { 1975 switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2029 return ret; 2021 return ret;
2030 } 2022 }
2031 2023
2024out:
2032 mutex_unlock(&id_priv->handler_mutex); 2025 mutex_unlock(&id_priv->handler_mutex);
2033 return ret; 2026 return ret;
2034} 2027}
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2039 struct rdma_cm_id *new_cm_id; 2032 struct rdma_cm_id *new_cm_id;
2040 struct rdma_id_private *listen_id, *conn_id; 2033 struct rdma_id_private *listen_id, *conn_id;
2041 struct rdma_cm_event event; 2034 struct rdma_cm_event event;
2042 int ret; 2035 int ret = -ECONNABORTED;
2043 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2036 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2044 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2037 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2045 2038
2046 listen_id = cm_id->context; 2039 listen_id = cm_id->context;
2047 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 2040
2048 return -ECONNABORTED; 2041 mutex_lock(&listen_id->handler_mutex);
2042 if (listen_id->state != RDMA_CM_LISTEN)
2043 goto out;
2049 2044
2050 /* Create a new RDMA id for the new IW CM ID */ 2045 /* Create a new RDMA id for the new IW CM ID */
2051 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2046 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
3216 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3211 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
3217 int ret = 0; 3212 int ret = 0;
3218 3213
3219 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 3214 mutex_lock(&id_priv->handler_mutex);
3220 return 0; 3215 if (id_priv->state != RDMA_CM_CONNECT)
3216 goto out;
3221 3217
3222 memset(&event, 0, sizeof event); 3218 memset(&event, 0, sizeof event);
3223 switch (ib_event->event) { 3219 switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3673 struct rdma_id_private *id_priv; 3669 struct rdma_id_private *id_priv;
3674 struct cma_multicast *mc = multicast->context; 3670 struct cma_multicast *mc = multicast->context;
3675 struct rdma_cm_event event; 3671 struct rdma_cm_event event;
3676 int ret; 3672 int ret = 0;
3677 3673
3678 id_priv = mc->id_priv; 3674 id_priv = mc->id_priv;
3679 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && 3675 mutex_lock(&id_priv->handler_mutex);
3680 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 3676 if (id_priv->state != RDMA_CM_ADDR_BOUND &&
3681 return 0; 3677 id_priv->state != RDMA_CM_ADDR_RESOLVED)
3678 goto out;
3682 3679
3683 if (!status) 3680 if (!status)
3684 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3681 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3720 return 0; 3717 return 0;
3721 } 3718 }
3722 3719
3720out:
3723 mutex_unlock(&id_priv->handler_mutex); 3721 mutex_unlock(&id_priv->handler_mutex);
3724 return 0; 3722 return 0;
3725} 3723}
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3878 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3876 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
3879 rdma_start_port(id_priv->cma_dev->device)]; 3877 rdma_start_port(id_priv->cma_dev->device)];
3880 if (addr->sa_family == AF_INET) { 3878 if (addr->sa_family == AF_INET) {
3881 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3879 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3880 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
3882 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 3881 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
3883 true); 3882 true);
3884 if (!err) { 3883 if (!err)
3885 mc->igmp_joined = true; 3884 mc->igmp_joined = true;
3886 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
3887 } 3885 }
3888 } else { 3886 } else {
3889 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3887 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5516fb070344..5c155fa91eec 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
661 if (err || port_attr->subnet_prefix) 661 if (err || port_attr->subnet_prefix)
662 return err; 662 return err;
663 663
664 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
665 return 0;
666
664 err = ib_query_gid(device, port_num, 0, &gid, NULL); 667 err = ib_query_gid(device, port_num, 0, &gid, NULL);
665 if (err) 668 if (err)
666 return err; 669 return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
1024 goto err_mad; 1027 goto err_mad;
1025 } 1028 }
1026 1029
1027 if (ib_add_ibnl_clients()) { 1030 ret = ib_add_ibnl_clients();
1031 if (ret) {
1028 pr_warn("Couldn't register ibnl clients\n"); 1032 pr_warn("Couldn't register ibnl clients\n");
1029 goto err_sa; 1033 goto err_sa;
1030 } 1034 }
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 43e3fa27102b..1c41b95cefec 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
506 if (!nlmsg_request) { 506 if (!nlmsg_request) {
507 pr_info("%s: Could not find a matching request (seq = %u)\n", 507 pr_info("%s: Could not find a matching request (seq = %u)\n",
508 __func__, msg_seq); 508 __func__, msg_seq);
509 return -EINVAL; 509 return -EINVAL;
510 } 510 }
511 pm_msg = nlmsg_request->req_buffer; 511 pm_msg = nlmsg_request->req_buffer;
512 local_sockaddr = (struct sockaddr_storage *) 512 local_sockaddr = (struct sockaddr_storage *)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 82fb511112da..2d49228f28b2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1638 /* Now, check to see if there are any methods still in use */ 1638 /* Now, check to see if there are any methods still in use */
1639 if (!check_method_table(method)) { 1639 if (!check_method_table(method)) {
1640 /* If not, release management method table */ 1640 /* If not, release management method table */
1641 kfree(method); 1641 kfree(method);
1642 class->method_table[mgmt_class] = NULL; 1642 class->method_table[mgmt_class] = NULL;
1643 /* Any management classes left ? */ 1643 /* Any management classes left ? */
1644 if (!check_class_table(class)) { 1644 if (!check_class_table(class)) {
1645 /* If not, release management class table */ 1645 /* If not, release management class table */
1646 kfree(class); 1646 kfree(class);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 5e573bb18660..a5793c8f1590 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
889static void setup_hw_stats(struct ib_device *device, struct ib_port *port, 889static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
890 u8 port_num) 890 u8 port_num)
891{ 891{
892 struct attribute_group *hsag = NULL; 892 struct attribute_group *hsag;
893 struct rdma_hw_stats *stats; 893 struct rdma_hw_stats *stats;
894 int i = 0, ret; 894 int i, ret;
895 895
896 stats = device->alloc_hw_stats(device, port_num); 896 stats = device->alloc_hw_stats(device, port_num);
897 897
@@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
899 return; 899 return;
900 900
901 if (!stats->names || stats->num_counters <= 0) 901 if (!stats->names || stats->num_counters <= 0)
902 goto err; 902 goto err_free_stats;
903 903
904 /*
905 * Two extra attribue elements here, one for the lifespan entry and
906 * one to NULL terminate the list for the sysfs core code
907 */
904 hsag = kzalloc(sizeof(*hsag) + 908 hsag = kzalloc(sizeof(*hsag) +
905 // 1 extra for the lifespan config entry 909 sizeof(void *) * (stats->num_counters + 2),
906 sizeof(void *) * (stats->num_counters + 1),
907 GFP_KERNEL); 910 GFP_KERNEL);
908 if (!hsag) 911 if (!hsag)
909 return; 912 goto err_free_stats;
910 913
911 ret = device->get_hw_stats(device, stats, port_num, 914 ret = device->get_hw_stats(device, stats, port_num,
912 stats->num_counters); 915 stats->num_counters);
913 if (ret != stats->num_counters) 916 if (ret != stats->num_counters)
914 goto err; 917 goto err_free_hsag;
915 918
916 stats->timestamp = jiffies; 919 stats->timestamp = jiffies;
917 920
@@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
922 hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); 925 hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
923 if (!hsag->attrs[i]) 926 if (!hsag->attrs[i])
924 goto err; 927 goto err;
928 sysfs_attr_init(hsag->attrs[i]);
925 } 929 }
926 930
927 /* treat an error here as non-fatal */ 931 /* treat an error here as non-fatal */
928 hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); 932 hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
933 if (hsag->attrs[i])
934 sysfs_attr_init(hsag->attrs[i]);
929 935
930 if (port) { 936 if (port) {
931 struct kobject *kobj = &port->kobj; 937 struct kobject *kobj = &port->kobj;
@@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
946 return; 952 return;
947 953
948err: 954err:
949 kfree(stats);
950 for (; i >= 0; i--) 955 for (; i >= 0; i--)
951 kfree(hsag->attrs[i]); 956 kfree(hsag->attrs[i]);
957err_free_hsag:
952 kfree(hsag); 958 kfree(hsag);
959err_free_stats:
960 kfree(stats);
953 return; 961 return;
954} 962}
955 963
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1a8babb8ee3c..825021d1008b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
1747 struct ib_srq *srq = NULL; 1747 struct ib_srq *srq = NULL;
1748 struct ib_qp *qp; 1748 struct ib_qp *qp;
1749 char *buf; 1749 char *buf;
1750 struct ib_qp_init_attr attr; 1750 struct ib_qp_init_attr attr = {};
1751 struct ib_uverbs_ex_create_qp_resp resp; 1751 struct ib_uverbs_ex_create_qp_resp resp;
1752 int ret; 1752 int ret;
1753 1753
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 1d7d4cf442e3..6298f54b4137 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
511 ah_attr->grh.dgid = sgid; 511 ah_attr->grh.dgid = sgid;
512 512
513 if (!rdma_cap_eth_ah(device, port_num)) { 513 if (!rdma_cap_eth_ah(device, port_num)) {
514 ret = ib_find_cached_gid_by_port(device, &dgid, 514 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
515 IB_GID_TYPE_IB, 515 ret = ib_find_cached_gid_by_port(device, &dgid,
516 port_num, NULL, 516 IB_GID_TYPE_IB,
517 &gid_index); 517 port_num, NULL,
518 if (ret) 518 &gid_index);
519 return ret; 519 if (ret)
520 return ret;
521 } else {
522 gid_index = 0;
523 }
520 } 524 }
521 525
522 ah_attr->grh.sgid_index = (u8) gid_index; 526 ah_attr->grh.sgid_index = (u8) gid_index;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 6e7050ab9e16..14d7eeb09be6 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
300 const struct cpumask *node_mask, 300 const struct cpumask *node_mask,
301 *proc_mask = tsk_cpus_allowed(current); 301 *proc_mask = tsk_cpus_allowed(current);
302 struct cpu_mask_set *set = &dd->affinity->proc; 302 struct cpu_mask_set *set = &dd->affinity->proc;
303 char buf[1024];
304 303
305 /* 304 /*
306 * check whether process/context affinity has already 305 * check whether process/context affinity has already
307 * been set 306 * been set
308 */ 307 */
309 if (cpumask_weight(proc_mask) == 1) { 308 if (cpumask_weight(proc_mask) == 1) {
310 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); 309 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
311 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", 310 current->pid, current->comm,
312 current->pid, current->comm, buf); 311 cpumask_pr_args(proc_mask));
313 /* 312 /*
314 * Mark the pre-set CPU as used. This is atomic so we don't 313 * Mark the pre-set CPU as used. This is atomic so we don't
315 * need the lock 314 * need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
318 cpumask_set_cpu(cpu, &set->used); 317 cpumask_set_cpu(cpu, &set->used);
319 goto done; 318 goto done;
320 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { 319 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
321 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); 320 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
322 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", 321 current->pid, current->comm,
323 current->pid, current->comm, buf); 322 cpumask_pr_args(proc_mask));
324 goto done; 323 goto done;
325 } 324 }
326 325
@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
356 cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? 355 cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
357 &dd->affinity->rcv_intr.mask : 356 &dd->affinity->rcv_intr.mask :
358 &dd->affinity->rcv_intr.used)); 357 &dd->affinity->rcv_intr.used));
359 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); 358 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
360 hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); 359 cpumask_pr_args(intrs));
361 360
362 /* 361 /*
363 * If we don't have a NUMA node requested, preference is towards 362 * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
366 if (node == -1) 365 if (node == -1)
367 node = dd->node; 366 node = dd->node;
368 node_mask = cpumask_of_node(node); 367 node_mask = cpumask_of_node(node);
369 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); 368 hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
370 hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); 369 cpumask_pr_args(node_mask));
371 370
372 /* diff will hold all unused cpus */ 371 /* diff will hold all unused cpus */
373 cpumask_andnot(diff, &set->mask, &set->used); 372 cpumask_andnot(diff, &set->mask, &set->used);
374 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); 373 hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
375 hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
376 374
377 /* get cpumask of available CPUs on preferred NUMA */ 375 /* get cpumask of available CPUs on preferred NUMA */
378 cpumask_and(mask, diff, node_mask); 376 cpumask_and(mask, diff, node_mask);
379 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); 377 hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
380 hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
381 378
382 /* 379 /*
383 * At first, we don't want to place processes on the same 380 * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
395 cpumask_andnot(diff, &set->mask, &set->used); 392 cpumask_andnot(diff, &set->mask, &set->used);
396 cpumask_andnot(mask, diff, node_mask); 393 cpumask_andnot(mask, diff, node_mask);
397 } 394 }
398 scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); 395 hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
399 hfi1_cdbg(PROC, "possible CPUs for process %s", buf); 396 cpumask_pr_args(mask));
400 397
401 cpu = cpumask_first(mask); 398 cpu = cpumask_first(mask);
402 if (cpu >= nr_cpu_ids) /* empty */ 399 if (cpu >= nr_cpu_ids) /* empty */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 3b876da745a1..f5de85178055 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *); 1037static void dc_start(struct hfi1_devdata *);
1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np); 1039 unsigned int *np);
1040static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1040static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1041 1041
1042/* 1042/*
1043 * Error interrupt table entry. This is used as input to the interrupt 1043 * Error interrupt table entry. This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
6962 } 6962 }
6963 6963
6964 reset_neighbor_info(ppd); 6964 reset_neighbor_info(ppd);
6965 if (ppd->mgmt_allowed)
6966 remove_full_mgmt_pkey(ppd);
6967 6965
6968 /* disable the port */ 6966 /* disable the port */
6969 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6967 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7070 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); 7068 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7071 ppd->pkeys[2] = FULL_MGMT_P_KEY; 7069 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7072 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7070 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7071 hfi1_event_pkey_change(ppd->dd, ppd->port);
7073} 7072}
7074 7073
7075static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) 7074static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7076{ 7075{
7077 ppd->pkeys[2] = 0; 7076 if (ppd->pkeys[2] != 0) {
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7077 ppd->pkeys[2] = 0;
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079 hfi1_event_pkey_change(ppd->dd, ppd->port);
7080 }
7079} 7081}
7080 7082
7081/* 7083/*
@@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7832 * save first 2 flits in the packet that caused 7834 * save first 2 flits in the packet that caused
7833 * the error 7835 * the error
7834 */ 7836 */
7835 dd->err_info_rcvport.packet_flit1 = hdr0; 7837 dd->err_info_rcvport.packet_flit1 = hdr0;
7836 dd->err_info_rcvport.packet_flit2 = hdr1; 7838 dd->err_info_rcvport.packet_flit2 = hdr1;
7837 } 7839 }
7838 switch (info) { 7840 switch (info) {
7839 case 1: 7841 case 1:
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
9168 return 0; 9170 return 0;
9169 } 9171 }
9170 9172
9173 /*
9174 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9175 * pkey table can be configured properly if the HFI unit is connected
9176 * to switch port with MgmtAllowed=NO
9177 */
9178 clear_full_mgmt_pkey(ppd);
9179
9171 return set_link_state(ppd, HLS_DN_POLL); 9180 return set_link_state(ppd, HLS_DN_POLL);
9172} 9181}
9173 9182
@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
9777 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) 9786 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9778 & SEND_LEN_CHECK1_LEN_VL15_MASK) << 9787 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9779 SEND_LEN_CHECK1_LEN_VL15_SHIFT; 9788 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9780 int i; 9789 int i, j;
9781 u32 thres; 9790 u32 thres;
9782 9791
9783 for (i = 0; i < ppd->vls_supported; i++) { 9792 for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
9801 sc_mtu_to_threshold(dd->vld[i].sc, 9810 sc_mtu_to_threshold(dd->vld[i].sc,
9802 dd->vld[i].mtu, 9811 dd->vld[i].mtu,
9803 dd->rcd[0]->rcvhdrqentsize)); 9812 dd->rcd[0]->rcvhdrqentsize));
9804 sc_set_cr_threshold(dd->vld[i].sc, thres); 9813 for (j = 0; j < INIT_SC_PER_VL; j++)
9814 sc_set_cr_threshold(
9815 pio_select_send_context_vl(dd, j, i),
9816 thres);
9805 } 9817 }
9806 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), 9818 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9807 sc_mtu_to_threshold(dd->vld[15].sc, 9819 sc_mtu_to_threshold(dd->vld[15].sc,
@@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque)
11906 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); 11918 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11907 } 11919 }
11908 11920
11909mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 11921 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11910} 11922}
11911 11923
11912#define C_MAX_NAME 13 /* 12 chars + one for /0 */ 11924#define C_MAX_NAME 13 /* 12 chars + one for /0 */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7a5b0e676cc7..c702a009608f 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
203 203
204 switch (cmd) { 204 switch (cmd) {
205 case HFI1_IOCTL_ASSIGN_CTXT: 205 case HFI1_IOCTL_ASSIGN_CTXT:
206 if (uctxt)
207 return -EINVAL;
208
206 if (copy_from_user(&uinfo, 209 if (copy_from_user(&uinfo,
207 (struct hfi1_user_info __user *)arg, 210 (struct hfi1_user_info __user *)arg,
208 sizeof(uinfo))) 211 sizeof(uinfo)))
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5cc492e5776d..eed971ccd2a1 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
1337 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1337 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1338 (void *)dd->rcvhdrtail_dummy_kvaddr, 1338 (void *)dd->rcvhdrtail_dummy_kvaddr,
1339 dd->rcvhdrtail_dummy_physaddr); 1339 dd->rcvhdrtail_dummy_physaddr);
1340 dd->rcvhdrtail_dummy_kvaddr = NULL; 1340 dd->rcvhdrtail_dummy_kvaddr = NULL;
1341 } 1341 }
1342 1342
1343 for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { 1343 for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
@@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
1383static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1383static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1384{ 1384{
1385 int ret = 0, j, pidx, initfail; 1385 int ret = 0, j, pidx, initfail;
1386 struct hfi1_devdata *dd = NULL; 1386 struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
1387 struct hfi1_pportdata *ppd; 1387 struct hfi1_pportdata *ppd;
1388 1388
1389 /* First, lock the non-writable module parameters */ 1389 /* First, lock the non-writable module parameters */
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 219029576ba0..fca07a1d6c28 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
78 memset(data, 0, size); 78 memset(data, 0, size);
79} 79}
80 80
81void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
82{
83 struct ib_event event;
84
85 event.event = IB_EVENT_PKEY_CHANGE;
86 event.device = &dd->verbs_dev.rdi.ibdev;
87 event.element.port_num = port;
88 ib_dispatch_event(&event);
89}
90
81static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) 91static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
82{ 92{
83 struct ib_mad_send_buf *send_buf; 93 struct ib_mad_send_buf *send_buf;
@@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1418 } 1428 }
1419 1429
1420 if (changed) { 1430 if (changed) {
1421 struct ib_event event;
1422
1423 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 1431 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1424 1432 hfi1_event_pkey_change(dd, port);
1425 event.event = IB_EVENT_PKEY_CHANGE;
1426 event.device = &dd->verbs_dev.rdi.ibdev;
1427 event.element.port_num = port;
1428 ib_dispatch_event(&event);
1429 } 1433 }
1434
1430 return 0; 1435 return 0;
1431} 1436}
1432 1437
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 55ee08675333..8b734aaae88a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -434,4 +434,6 @@ struct sc2vlnt {
434 COUNTER_MASK(1, 3) | \ 434 COUNTER_MASK(1, 3) | \
435 COUNTER_MASK(1, 4)) 435 COUNTER_MASK(1, 4))
436 436
437void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
438
437#endif /* _HFI1_MAD_H */ 439#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d5edb1afbb8f..d4022450b73f 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
995 /* counter is reset if occupancy count changes */ 995 /* counter is reset if occupancy count changes */
996 if (reg != reg_prev) 996 if (reg != reg_prev)
997 loop = 0; 997 loop = 0;
998 if (loop > 500) { 998 if (loop > 50000) {
999 /* timed out - bounce the link */ 999 /* timed out - bounce the link */
1000 dd_dev_err(dd, 1000 dd_dev_err(dd,
1001 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", 1001 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
1798} 1798}
1799 1799
1800/* 1800/*
1801 * Set credit return threshold for the kernel send context
1802 */
1803static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1804{
1805 u32 thres;
1806
1807 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1808 50),
1809 sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1810 dd->vld[i].mtu,
1811 dd->rcd[0]->rcvhdrqentsize));
1812 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1813}
1814
1815/*
1801 * pio_map_init - called when #vls change 1816 * pio_map_init - called when #vls change
1802 * @dd: hfi1_devdata 1817 * @dd: hfi1_devdata
1803 * @port: port number 1818 * @port: port number
@@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1872 if (!newmap->map[i]) 1887 if (!newmap->map[i])
1873 goto bail; 1888 goto bail;
1874 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1889 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1875 /* assign send contexts */ 1890 /*
1891 * assign send contexts and
1892 * adjust credit return threshold
1893 */
1876 for (j = 0; j < sz; j++) { 1894 for (j = 0; j < sz; j++) {
1877 if (dd->kernel_send_context[scontext]) 1895 if (dd->kernel_send_context[scontext]) {
1878 newmap->map[i]->ksc[j] = 1896 newmap->map[i]->ksc[j] =
1879 dd->kernel_send_context[scontext]; 1897 dd->kernel_send_context[scontext];
1898 set_threshold(dd, scontext, i);
1899 }
1880 if (++scontext >= first_scontext + 1900 if (++scontext >= first_scontext +
1881 vl_scontexts[i]) 1901 vl_scontexts[i])
1882 /* wrap back to first send context */ 1902 /* wrap back to first send context */
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 2441669f0817..9fb561682c66 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
579 579
580 if (ppd->qsfp_info.cache_valid) { 580 if (ppd->qsfp_info.cache_valid) {
581 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) 581 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
582 sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); 582 snprintf(lenstr, sizeof(lenstr), "%dM ",
583 cache[QSFP_MOD_LEN_OFFS]);
583 584
584 power_byte = cache[QSFP_MOD_PWR_OFFS]; 585 power_byte = cache[QSFP_MOD_PWR_OFFS];
585 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", 586 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 79b2952c0dfb..4cfb13771897 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -214,19 +214,6 @@ const char *print_u32_array(
214 return ret; 214 return ret;
215} 215}
216 216
217const char *print_u64_array(
218 struct trace_seq *p,
219 u64 *arr, int len)
220{
221 int i;
222 const char *ret = trace_seq_buffer_ptr(p);
223
224 for (i = 0; i < len; i++)
225 trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
226 trace_seq_putc(p, 0);
227 return ret;
228}
229
230__hfi1_trace_fn(PKT); 217__hfi1_trace_fn(PKT);
231__hfi1_trace_fn(PROC); 218__hfi1_trace_fn(PROC);
232__hfi1_trace_fn(SDMA); 219__hfi1_trace_fn(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 29f4795f866c..47ffd273ecbd 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -183,7 +183,7 @@ struct user_sdma_iovec {
183 struct sdma_mmu_node *node; 183 struct sdma_mmu_node *node;
184}; 184};
185 185
186#define SDMA_CACHE_NODE_EVICT BIT(0) 186#define SDMA_CACHE_NODE_EVICT 0
187 187
188struct sdma_mmu_node { 188struct sdma_mmu_node {
189 struct mmu_rb_node rb; 189 struct mmu_rb_node rb;
@@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
1355 */ 1355 */
1356 SDMA_DBG(req, "TID offset %ubytes %uunits om%u", 1356 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1357 req->tidoffset, req->tidoffset / req->omfactor, 1357 req->tidoffset, req->tidoffset / req->omfactor,
1358 !!(req->omfactor - KDETH_OM_SMALL)); 1358 req->omfactor != KDETH_OM_SMALL);
1359 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, 1359 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1360 req->tidoffset / req->omfactor); 1360 req->tidoffset / req->omfactor);
1361 KDETH_SET(hdr->kdeth.ver_tid_offset, OM, 1361 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1362 !!(req->omfactor - KDETH_OM_SMALL)); 1362 req->omfactor != KDETH_OM_SMALL);
1363 } 1363 }
1364done: 1364done:
1365 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, 1365 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index bc95c4112c61..d8fb056526f8 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
92 92
93struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, 93struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp) 94 struct rvt_qp *qp)
95 __must_hold(&qp->s_lock)
95{ 96{
96 struct verbs_txreq *tx = ERR_PTR(-EBUSY); 97 struct verbs_txreq *tx = ERR_PTR(-EBUSY);
97 unsigned long flags;
98 98
99 spin_lock_irqsave(&qp->s_lock, flags);
100 write_seqlock(&dev->iowait_lock); 99 write_seqlock(&dev->iowait_lock);
101 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
102 struct hfi1_qp_priv *priv; 101 struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
116 } 115 }
117out: 116out:
118 write_sequnlock(&dev->iowait_lock); 117 write_sequnlock(&dev->iowait_lock);
119 spin_unlock_irqrestore(&qp->s_lock, flags);
120 return tx; 118 return tx;
121} 119}
122 120
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1cf69b2fe4a5..a1d6e0807f97 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
73 73
74static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, 74static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
75 struct rvt_qp *qp) 75 struct rvt_qp *qp)
76 __must_hold(&qp->slock)
76{ 77{
77 struct verbs_txreq *tx; 78 struct verbs_txreq *tx;
78 struct hfi1_qp_priv *priv = qp->priv; 79 struct hfi1_qp_priv *priv = qp->priv;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8b9532034558..b738acdb9b02 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -113,6 +113,8 @@
113 113
114#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) 114#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
115#define IW_CFG_FPM_QP_COUNT 32768 115#define IW_CFG_FPM_QP_COUNT 32768
116#define I40IW_MAX_PAGES_PER_FMR 512
117#define I40IW_MIN_PAGES_PER_FMR 1
116 118
117#define I40IW_MTU_TO_MSS 40 119#define I40IW_MTU_TO_MSS 40
118#define I40IW_DEFAULT_MSS 1460 120#define I40IW_DEFAULT_MSS 1460
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 02a735b64208..33959ed14563 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
79 props->max_qp_init_rd_atom = props->max_qp_rd_atom; 79 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
80 props->atomic_cap = IB_ATOMIC_NONE; 80 props->atomic_cap = IB_ATOMIC_NONE;
81 props->max_map_per_fmr = 1; 81 props->max_map_per_fmr = 1;
82 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
82 return 0; 83 return 0;
83} 84}
84 85
@@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1527 mutex_lock(&iwdev->pbl_mutex); 1528 mutex_lock(&iwdev->pbl_mutex);
1528 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); 1529 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1529 mutex_unlock(&iwdev->pbl_mutex); 1530 mutex_unlock(&iwdev->pbl_mutex);
1530 if (!status) 1531 if (status)
1531 goto err1; 1532 goto err1;
1532 1533
1533 if (palloc->level != I40IW_LEVEL_1) 1534 if (palloc->level != I40IW_LEVEL_1)
@@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
2149 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; 2150 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2150 struct i40iw_fast_reg_stag_info info; 2151 struct i40iw_fast_reg_stag_info info;
2151 2152
2153 memset(&info, 0, sizeof(info));
2152 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; 2154 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2153 info.access_rights |= i40iw_get_user_access(flags); 2155 info.access_rights |= i40iw_get_user_access(flags);
2154 info.stag_key = reg_wr(ib_wr)->key & 0xff; 2156 info.stag_key = reg_wr(ib_wr)->key & 0xff;
@@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
2158 info.addr_type = I40IW_ADDR_TYPE_VA_BASED; 2160 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2159 info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 2161 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2160 info.total_len = iwmr->ibmr.length; 2162 info.total_len = iwmr->ibmr.length;
2163 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2161 info.first_pm_pbl_index = palloc->level1.idx; 2164 info.first_pm_pbl_index = palloc->level1.idx;
2162 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 2165 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2163 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; 2166 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2164 2167
2168 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2169 info.chunk_size = 1;
2170
2165 if (page_shift == 21) 2171 if (page_shift == 21)
2166 info.page_size = 1; /* 2M page */ 2172 info.page_size = 1; /* 2M page */
2167 2173
@@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2327{ 2333{
2328 struct i40iw_cq *iwcq; 2334 struct i40iw_cq *iwcq;
2329 struct i40iw_cq_uk *ukcq; 2335 struct i40iw_cq_uk *ukcq;
2330 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; 2336 unsigned long flags;
2337 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2331 2338
2332 iwcq = (struct i40iw_cq *)ibcq; 2339 iwcq = (struct i40iw_cq *)ibcq;
2333 ukcq = &iwcq->sc_cq.cq_uk; 2340 ukcq = &iwcq->sc_cq.cq_uk;
2334 if (notify_flags == IB_CQ_NEXT_COMP) 2341 if (notify_flags == IB_CQ_SOLICITED)
2335 cq_notify = IW_CQ_COMPL_EVENT; 2342 cq_notify = IW_CQ_COMPL_SOLICITED;
2343 spin_lock_irqsave(&iwcq->lock, flags);
2336 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); 2344 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2345 spin_unlock_irqrestore(&iwcq->lock, flags);
2337 return 0; 2346 return 0;
2338} 2347}
2339 2348
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 105246fba2e7..5fc623362731 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
47 47
48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
49 ah->av.ib.g_slid = ah_attr->src_path_bits; 49 ah->av.ib.g_slid = ah_attr->src_path_bits;
50 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
50 if (ah_attr->ah_flags & IB_AH_GRH) { 51 if (ah_attr->ah_flags & IB_AH_GRH) {
51 ah->av.ib.g_slid |= 0x80; 52 ah->av.ib.g_slid |= 0x80;
52 ah->av.ib.gid_index = ah_attr->grh.sgid_index; 53 ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
64 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) 65 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
65 --ah->av.ib.stat_rate; 66 --ah->av.ib.stat_rate;
66 } 67 }
67 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
68 68
69 return &ah->ibah; 69 return &ah->ibah;
70} 70}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d68f506c1922..9c2e53d28f98 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); 527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528 spin_unlock(&tun_qp->tx_lock); 528 spin_unlock(&tun_qp->tx_lock);
529 if (ret) 529 if (ret)
530 goto out; 530 goto end;
531 531
532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); 532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah) 533 if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
596 wr.wr.send_flags = IB_SEND_SIGNALED; 596 wr.wr.send_flags = IB_SEND_SIGNALED;
597 597
598 ret = ib_post_send(src_qp, &wr.wr, &bad_wr); 598 ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
599out: 599 if (!ret)
600 if (ret) 600 return 0;
601 ib_destroy_ah(ah); 601 out:
602 spin_lock(&tun_qp->tx_lock);
603 tun_qp->tx_ix_tail++;
604 spin_unlock(&tun_qp->tx_lock);
605 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
606end:
607 ib_destroy_ah(ah);
602 return ret; 608 return ret;
603} 609}
604 610
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1326 1332
1327 1333
1328 ret = ib_post_send(send_qp, &wr.wr, &bad_wr); 1334 ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1335 if (!ret)
1336 return 0;
1337
1338 spin_lock(&sqp->tx_lock);
1339 sqp->tx_ix_tail++;
1340 spin_unlock(&sqp->tx_lock);
1341 sqp->tx_ring[wire_tx_ix].ah = NULL;
1329out: 1342out:
1330 if (ret) 1343 ib_destroy_ah(ah);
1331 ib_destroy_ah(ah);
1332 return ret; 1344 return ret;
1333} 1345}
1334 1346
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b01ef6eee6e8..42a46078d7d5 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
505 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; 505 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
506 else 506 else
507 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; 507 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
508 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
509 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
510 } 508 }
509 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
510 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
511 511
512 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 512 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
513 513
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1704 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; 1704 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1705 int is_bonded = mlx4_is_bonded(dev); 1705 int is_bonded = mlx4_is_bonded(dev);
1706 1706
1707 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1708 return ERR_PTR(-EINVAL);
1709
1707 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 1710 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1708 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) 1711 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1709 return ERR_PTR(-EOPNOTSUPP); 1712 return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6c5ac5d8f32f..29acda249612 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
139 u32 max_pages; 139 u32 max_pages;
140 struct mlx4_mr mmr; 140 struct mlx4_mr mmr;
141 struct ib_umem *umem; 141 struct ib_umem *umem;
142 void *pages_alloc; 142 size_t page_map_size;
143}; 143};
144 144
145struct mlx4_ib_mw { 145struct mlx4_ib_mw {
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 631272172a0b..5d73989d9771 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
277 struct mlx4_ib_mr *mr, 277 struct mlx4_ib_mr *mr,
278 int max_pages) 278 int max_pages)
279{ 279{
280 int size = max_pages * sizeof(u64);
281 int add_size;
282 int ret; 280 int ret;
283 281
284 add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 282 /* Ensure that size is aligned to DMA cacheline
283 * requirements.
284 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
285 * so page_map_size will never cross PAGE_SIZE.
286 */
287 mr->page_map_size = roundup(max_pages * sizeof(u64),
288 MLX4_MR_PAGES_ALIGN);
285 289
286 mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); 290 /* Prevent cross page boundary allocation. */
287 if (!mr->pages_alloc) 291 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
292 if (!mr->pages)
288 return -ENOMEM; 293 return -ENOMEM;
289 294
290 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
291
292 mr->page_map = dma_map_single(device->dma_device, mr->pages, 295 mr->page_map = dma_map_single(device->dma_device, mr->pages,
293 size, DMA_TO_DEVICE); 296 mr->page_map_size, DMA_TO_DEVICE);
294 297
295 if (dma_mapping_error(device->dma_device, mr->page_map)) { 298 if (dma_mapping_error(device->dma_device, mr->page_map)) {
296 ret = -ENOMEM; 299 ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
298 } 301 }
299 302
300 return 0; 303 return 0;
301err:
302 kfree(mr->pages_alloc);
303 304
305err:
306 free_page((unsigned long)mr->pages);
304 return ret; 307 return ret;
305} 308}
306 309
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
309{ 312{
310 if (mr->pages) { 313 if (mr->pages) {
311 struct ib_device *device = mr->ibmr.device; 314 struct ib_device *device = mr->ibmr.device;
312 int size = mr->max_pages * sizeof(u64);
313 315
314 dma_unmap_single(device->dma_device, mr->page_map, 316 dma_unmap_single(device->dma_device, mr->page_map,
315 size, DMA_TO_DEVICE); 317 mr->page_map_size, DMA_TO_DEVICE);
316 kfree(mr->pages_alloc); 318 free_page((unsigned long)mr->pages);
317 mr->pages = NULL; 319 mr->pages = NULL;
318 } 320 }
319} 321}
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
537 mr->npages = 0; 539 mr->npages = 0;
538 540
539 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, 541 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
540 sizeof(u64) * mr->max_pages, 542 mr->page_map_size, DMA_TO_DEVICE);
541 DMA_TO_DEVICE);
542 543
543 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); 544 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
544 545
545 ib_dma_sync_single_for_device(ibmr->device, mr->page_map, 546 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
546 sizeof(u64) * mr->max_pages, 547 mr->page_map_size, DMA_TO_DEVICE);
547 DMA_TO_DEVICE);
548 548
549 return rc; 549 return rc;
550} 550}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 81b0e1fbec1d..8db8405c1e99 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
362 sizeof (struct mlx4_wqe_raddr_seg); 362 sizeof (struct mlx4_wqe_raddr_seg);
363 case MLX4_IB_QPT_RC: 363 case MLX4_IB_QPT_RC:
364 return sizeof (struct mlx4_wqe_ctrl_seg) + 364 return sizeof (struct mlx4_wqe_ctrl_seg) +
365 sizeof (struct mlx4_wqe_atomic_seg) + 365 sizeof (struct mlx4_wqe_masked_atomic_seg) +
366 sizeof (struct mlx4_wqe_raddr_seg); 366 sizeof (struct mlx4_wqe_raddr_seg);
367 case MLX4_IB_QPT_SMI: 367 case MLX4_IB_QPT_SMI:
368 case MLX4_IB_QPT_GSI: 368 case MLX4_IB_QPT_GSI:
@@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1191 { 1191 {
1192 err = create_qp_common(to_mdev(pd->device), pd, init_attr, 1192 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1193 udata, 0, &qp, gfp); 1193 udata, 0, &qp, gfp);
1194 if (err) 1194 if (err) {
1195 kfree(qp);
1195 return ERR_PTR(err); 1196 return ERR_PTR(err);
1197 }
1196 1198
1197 qp->ibqp.qp_num = qp->mqp.qpn; 1199 qp->ibqp.qp_num = qp->mqp.qpn;
1198 qp->xrcdn = xrcdn; 1200 qp->xrcdn = xrcdn;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dabcc65bd65e..9c0e67bd2ba7 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
822 int eqn; 822 int eqn;
823 int err; 823 int err;
824 824
825 if (entries < 0) 825 if (entries < 0 ||
826 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
826 return ERR_PTR(-EINVAL); 827 return ERR_PTR(-EINVAL);
827 828
828 if (check_cq_create_flags(attr->flags)) 829 if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1168 return -ENOSYS; 1169 return -ENOSYS;
1169 } 1170 }
1170 1171
1171 if (entries < 1) 1172 if (entries < 1 ||
1173 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1174 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1175 entries,
1176 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1172 return -EINVAL; 1177 return -EINVAL;
1178 }
1173 1179
1174 entries = roundup_pow_of_two(entries + 1); 1180 entries = roundup_pow_of_two(entries + 1);
1175 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) 1181 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1176 return -EINVAL; 1182 return -EINVAL;
1177 1183
1178 if (entries == ibcq->cqe + 1) 1184 if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1534af113058..364aab9f3c9e 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
121 pma_cnt_ext->port_xmit_data = 121 pma_cnt_ext->port_xmit_data =
122 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, 122 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
123 transmitted_ib_multicast.octets) >> 2); 123 transmitted_ib_multicast.octets) >> 2);
124 pma_cnt_ext->port_xmit_data = 124 pma_cnt_ext->port_rcv_data =
125 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, 125 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
126 received_ib_multicast.octets) >> 2); 126 received_ib_multicast.octets) >> 2);
127 pma_cnt_ext->port_xmit_packets = 127 pma_cnt_ext->port_xmit_packets =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c72797cd9e4f..b48ad85315dc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
524 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) 524 MLX5_CAP_ETH(dev->mdev, scatter_fcs))
525 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 525 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
526 526
527 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
528 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
529
527 props->vendor_part_id = mdev->pdev->device; 530 props->vendor_part_id = mdev->pdev->device;
528 props->hw_ver = mdev->pdev->revision; 531 props->hw_ver = mdev->pdev->revision;
529 532
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
915 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 918 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
916 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 919 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
917 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 920 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
918 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 921 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
922 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
919 resp.cache_line_size = L1_CACHE_BYTES; 923 resp.cache_line_size = L1_CACHE_BYTES;
920 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 924 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
921 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 925 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
988 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 992 if (field_avail(typeof(resp), cqe_version, udata->outlen))
989 resp.response_length += sizeof(resp.cqe_version); 993 resp.response_length += sizeof(resp.cqe_version);
990 994
991 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 995 /*
996 * We don't want to expose information from the PCI bar that is located
997 * after 4096 bytes, so if the arch only supports larger pages, let's
998 * pretend we don't support reading the HCA's core clock. This is also
999 * forced by mmap function.
1000 */
1001 if (PAGE_SIZE <= 4096 &&
1002 field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
992 resp.comp_mask |= 1003 resp.comp_mask |=
993 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1004 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
994 resp.hca_core_clock_offset = 1005 resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1798{ 1809{
1799 struct mlx5_ib_dev *dev = 1810 struct mlx5_ib_dev *dev =
1800 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 1811 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1801 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), 1812 return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
1802 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 1813 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
1803} 1814}
1804 1815
@@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1866 break; 1877 break;
1867 1878
1868 case MLX5_DEV_EVENT_PORT_DOWN: 1879 case MLX5_DEV_EVENT_PORT_DOWN:
1880 case MLX5_DEV_EVENT_PORT_INITIALIZED:
1869 ibev.event = IB_EVENT_PORT_ERR; 1881 ibev.event = IB_EVENT_PORT_ERR;
1870 port = (u8)param; 1882 port = (u8)param;
1871 break; 1883 break;
1872 1884
1873 case MLX5_DEV_EVENT_PORT_INITIALIZED:
1874 /* not used by ULPs */
1875 return;
1876
1877 case MLX5_DEV_EVENT_LID_CHANGE: 1885 case MLX5_DEV_EVENT_LID_CHANGE:
1878 ibev.event = IB_EVENT_LID_CHANGE; 1886 ibev.event = IB_EVENT_LID_CHANGE;
1879 port = (u8)param; 1887 port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 504117657d41..ce0a7ab35a22 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
235 qp->rq.max_gs = 0; 235 qp->rq.max_gs = 0;
236 qp->rq.wqe_cnt = 0; 236 qp->rq.wqe_cnt = 0;
237 qp->rq.wqe_shift = 0; 237 qp->rq.wqe_shift = 0;
238 cap->max_recv_wr = 0;
239 cap->max_recv_sge = 0;
238 } else { 240 } else {
239 if (ucmd) { 241 if (ucmd) {
240 qp->rq.wqe_cnt = ucmd->rq_wqe_count; 242 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
1851static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1853static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1852 const struct ib_ah_attr *ah, 1854 const struct ib_ah_attr *ah,
1853 struct mlx5_qp_path *path, u8 port, int attr_mask, 1855 struct mlx5_qp_path *path, u8 port, int attr_mask,
1854 u32 path_flags, const struct ib_qp_attr *attr) 1856 u32 path_flags, const struct ib_qp_attr *attr,
1857 bool alt)
1855{ 1858{
1856 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); 1859 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1857 int err; 1860 int err;
1858 1861
1859 if (attr_mask & IB_QP_PKEY_INDEX) 1862 if (attr_mask & IB_QP_PKEY_INDEX)
1860 path->pkey_index = attr->pkey_index; 1863 path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
1864 attr->pkey_index);
1861 1865
1862 if (ah->ah_flags & IB_AH_GRH) { 1866 if (ah->ah_flags & IB_AH_GRH) {
1863 if (ah->grh.sgid_index >= 1867 if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1877 ah->grh.sgid_index); 1881 ah->grh.sgid_index);
1878 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; 1882 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
1879 } else { 1883 } else {
1880 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1884 path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1881 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 1885 path->fl_free_ar |=
1882 0; 1886 (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
1883 path->rlid = cpu_to_be16(ah->dlid); 1887 path->rlid = cpu_to_be16(ah->dlid);
1884 path->grh_mlid = ah->src_path_bits & 0x7f; 1888 path->grh_mlid = ah->src_path_bits & 0x7f;
1885 if (ah->ah_flags & IB_AH_GRH) 1889 if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1903 path->port = port; 1907 path->port = port;
1904 1908
1905 if (attr_mask & IB_QP_TIMEOUT) 1909 if (attr_mask & IB_QP_TIMEOUT)
1906 path->ackto_lt = attr->timeout << 3; 1910 path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
1907 1911
1908 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 1912 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
1909 return modify_raw_packet_eth_prio(dev->mdev, 1913 return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2264 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); 2268 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
2265 2269
2266 if (attr_mask & IB_QP_PKEY_INDEX) 2270 if (attr_mask & IB_QP_PKEY_INDEX)
2267 context->pri_path.pkey_index = attr->pkey_index; 2271 context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
2268 2272
2269 /* todo implement counter_index functionality */ 2273 /* todo implement counter_index functionality */
2270 2274
@@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2277 if (attr_mask & IB_QP_AV) { 2281 if (attr_mask & IB_QP_AV) {
2278 err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, 2282 err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
2279 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 2283 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
2280 attr_mask, 0, attr); 2284 attr_mask, 0, attr, false);
2281 if (err) 2285 if (err)
2282 goto out; 2286 goto out;
2283 } 2287 }
@@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2288 if (attr_mask & IB_QP_ALT_PATH) { 2292 if (attr_mask & IB_QP_ALT_PATH) {
2289 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, 2293 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
2290 &context->alt_path, 2294 &context->alt_path,
2291 attr->alt_port_num, attr_mask, 0, attr); 2295 attr->alt_port_num,
2296 attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
2297 0, attr, true);
2292 if (err) 2298 if (err)
2293 goto out; 2299 goto out;
2294 } 2300 }
@@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
3326 return MLX5_FENCE_MODE_SMALL_AND_FENCE; 3332 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
3327 else 3333 else
3328 return fence; 3334 return fence;
3329 3335 } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
3330 } else { 3336 return MLX5_FENCE_MODE_FENCE;
3331 return 0;
3332 } 3337 }
3338
3339 return 0;
3333} 3340}
3334 3341
3335static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3342static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
4013 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 4020 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
4014 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 4021 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
4015 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 4022 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
4016 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; 4023 qp_attr->alt_pkey_index =
4024 be16_to_cpu(context->alt_path.pkey_index);
4017 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 4025 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
4018 } 4026 }
4019 4027
4020 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; 4028 qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
4021 qp_attr->port_num = context->pri_path.port; 4029 qp_attr->port_num = context->pri_path.port;
4022 4030
4023 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 4031 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4079 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 4087 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
4080 4088
4081 if (!ibqp->uobject) { 4089 if (!ibqp->uobject) {
4082 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; 4090 qp_attr->cap.max_send_wr = qp->sq.max_post;
4083 qp_attr->cap.max_send_sge = qp->sq.max_gs; 4091 qp_attr->cap.max_send_sge = qp->sq.max_gs;
4092 qp_init_attr->qp_context = ibqp->qp_context;
4084 } else { 4093 } else {
4085 qp_attr->cap.max_send_wr = 0; 4094 qp_attr->cap.max_send_wr = 0;
4086 qp_attr->cap.max_send_sge = 0; 4095 qp_attr->cap.max_send_sge = 0;
4087 } 4096 }
4088 4097
4089 /* We don't support inline sends for kernel QPs (yet), and we 4098 qp_init_attr->qp_type = ibqp->qp_type;
4090 * don't know what userspace's value should be. 4099 qp_init_attr->recv_cq = ibqp->recv_cq;
4091 */ 4100 qp_init_attr->send_cq = ibqp->send_cq;
4092 qp_attr->cap.max_inline_data = 0; 4101 qp_init_attr->srq = ibqp->srq;
4102 qp_attr->cap.max_inline_data = qp->max_inline_data;
4093 4103
4094 qp_init_attr->cap = qp_attr->cap; 4104 qp_init_attr->cap = qp_attr->cap;
4095 4105
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ff946d5f59e4..382466a90da7 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2178 2178
2179 switch (cmd.type) { 2179 switch (cmd.type) {
2180 case QIB_CMD_ASSIGN_CTXT: 2180 case QIB_CMD_ASSIGN_CTXT:
2181 if (rcd) {
2182 ret = -EINVAL;
2183 goto bail;
2184 }
2185
2181 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); 2186 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2182 if (ret) 2187 if (ret)
2183 goto bail; 2188 goto bail;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 7209fbc03ccb..a0b6ebee4d8a 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -36,7 +36,6 @@
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/hugetlb.h> 38#include <linux/hugetlb.h>
39#include <linux/dma-attrs.h>
40#include <linux/iommu.h> 39#include <linux/iommu.h>
41#include <linux/workqueue.h> 40#include <linux/workqueue.h>
42#include <linux/list.h> 41#include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
112 int i; 111 int i;
113 int flags; 112 int flags;
114 dma_addr_t pa; 113 dma_addr_t pa;
115 DEFINE_DMA_ATTRS(attrs);
116
117 if (dmasync)
118 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
119 114
120 if (!can_do_mlock()) 115 if (!can_do_mlock())
121 return -EPERM; 116 return -EPERM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 5fa4d4d81ee0..41ba7e9cadaa 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
369 /* wrap to first map page, invert bit 0 */ 369 /* wrap to first map page, invert bit 0 */
370 offset = qpt->incr | ((offset & 1) ^ 1); 370 offset = qpt->incr | ((offset & 1) ^ 1);
371 } 371 }
372 /* there can be no bits at shift and below */ 372 /* there can be no set bits in low-order QoS bits */
373 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 373 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
374 qpn = mk_qpn(qpt, map, offset); 374 qpn = mk_qpn(qpt, map, offset);
375 } 375 }
376 376
@@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
502 */ 502 */
503static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 503static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
504 enum ib_qp_type type) 504 enum ib_qp_type type)
505 __releases(&qp->s_lock)
506 __releases(&qp->s_hlock)
507 __releases(&qp->r_lock)
508 __acquires(&qp->r_lock)
509 __acquires(&qp->s_hlock)
510 __acquires(&qp->s_lock)
505{ 511{
506 if (qp->state != IB_QPS_RESET) { 512 if (qp->state != IB_QPS_RESET) {
507 qp->state = IB_QPS_RESET; 513 qp->state = IB_QPS_RESET;
@@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
570 qp->s_ssn = 1; 576 qp->s_ssn = 1;
571 qp->s_lsn = 0; 577 qp->s_lsn = 0;
572 qp->s_mig_state = IB_MIG_MIGRATED; 578 qp->s_mig_state = IB_MIG_MIGRATED;
573 if (qp->s_ack_queue)
574 memset(
575 qp->s_ack_queue,
576 0,
577 rvt_max_atomic(rdi) *
578 sizeof(*qp->s_ack_queue));
579 qp->r_head_ack_queue = 0; 579 qp->r_head_ack_queue = 0;
580 qp->s_tail_ack_queue = 0; 580 qp->s_tail_ack_queue = 0;
581 qp->s_num_rd_atomic = 0; 581 qp->s_num_rd_atomic = 0;
@@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
699 * initialization that is needed. 699 * initialization that is needed.
700 */ 700 */
701 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 701 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
702 if (!priv) 702 if (IS_ERR(priv)) {
703 ret = priv;
703 goto bail_qp; 704 goto bail_qp;
705 }
704 qp->priv = priv; 706 qp->priv = priv;
705 qp->timeout_jiffies = 707 qp->timeout_jiffies =
706 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 708 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index e1cc2cc42f25..30c4fda7a05a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
501 !rdi->driver_f.quiesce_qp || 501 !rdi->driver_f.quiesce_qp ||
502 !rdi->driver_f.notify_error_qp || 502 !rdi->driver_f.notify_error_qp ||
503 !rdi->driver_f.mtu_from_qp || 503 !rdi->driver_f.mtu_from_qp ||
504 !rdi->driver_f.mtu_to_path_mtu || 504 !rdi->driver_f.mtu_to_path_mtu)
505 !rdi->driver_f.shut_down_port ||
506 !rdi->driver_f.cap_mask_chg)
507 return -EINVAL; 505 return -EINVAL;
508 break; 506 break;
509 507
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bab7db6fa9ab..4f7d9b48df64 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -94,6 +94,7 @@ enum {
94 IPOIB_NEIGH_TBL_FLUSH = 12, 94 IPOIB_NEIGH_TBL_FLUSH = 12,
95 IPOIB_FLAG_DEV_ADDR_SET = 13, 95 IPOIB_FLAG_DEV_ADDR_SET = 13,
96 IPOIB_FLAG_DEV_ADDR_CTRL = 14, 96 IPOIB_FLAG_DEV_ADDR_CTRL = 14,
97 IPOIB_FLAG_GOING_DOWN = 15,
97 98
98 IPOIB_MAX_BACKOFF_SECONDS = 16, 99 IPOIB_MAX_BACKOFF_SECONDS = 16,
99 100
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index b2f42835d76d..951d9abcca8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1486{ 1486{
1487 struct net_device *dev = to_net_dev(d); 1487 struct net_device *dev = to_net_dev(d);
1488 int ret; 1488 int ret;
1489 struct ipoib_dev_priv *priv = netdev_priv(dev);
1490
1491 if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1492 return -EPERM;
1489 1493
1490 if (!rtnl_trylock()) 1494 if (!rtnl_trylock())
1491 return restart_syscall(); 1495 return restart_syscall();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 45c40a17d6a6..dc6d241b9406 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1015 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) 1015 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
1016 return false; 1016 return false;
1017 1017
1018 netif_addr_lock(priv->dev); 1018 netif_addr_lock_bh(priv->dev);
1019 1019
1020 /* The subnet prefix may have changed, update it now so we won't have 1020 /* The subnet prefix may have changed, update it now so we won't have
1021 * to do it later 1021 * to do it later
@@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1026 1026
1027 search_gid.global.interface_id = priv->local_gid.global.interface_id; 1027 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1028 1028
1029 netif_addr_unlock(priv->dev); 1029 netif_addr_unlock_bh(priv->dev);
1030 1030
1031 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, 1031 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
1032 priv->dev, &port, &index); 1032 priv->dev, &port, &index);
1033 1033
1034 netif_addr_lock(priv->dev); 1034 netif_addr_lock_bh(priv->dev);
1035 1035
1036 if (search_gid.global.interface_id != 1036 if (search_gid.global.interface_id !=
1037 priv->local_gid.global.interface_id) 1037 priv->local_gid.global.interface_id)
@@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1092 } 1092 }
1093 1093
1094out: 1094out:
1095 netif_addr_unlock(priv->dev); 1095 netif_addr_unlock_bh(priv->dev);
1096 1096
1097 return ret; 1097 return ret;
1098} 1098}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2d7c16346648..5f58c41ef787 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1206 neigh = NULL; 1206 neigh = NULL;
1207 goto out_unlock; 1207 goto out_unlock;
1208 } 1208 }
1209 neigh->alive = jiffies; 1209
1210 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1211 neigh->alive = jiffies;
1210 goto out_unlock; 1212 goto out_unlock;
1211 } 1213 }
1212 } 1214 }
@@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
1851 struct ipoib_dev_priv *child_priv; 1853 struct ipoib_dev_priv *child_priv;
1852 struct net_device *netdev = priv->dev; 1854 struct net_device *netdev = priv->dev;
1853 1855
1854 netif_addr_lock(netdev); 1856 netif_addr_lock_bh(netdev);
1855 1857
1856 memcpy(&priv->local_gid.global.interface_id, 1858 memcpy(&priv->local_gid.global.interface_id,
1857 &gid->global.interface_id, 1859 &gid->global.interface_id,
@@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
1859 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); 1861 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
1860 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 1862 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1861 1863
1862 netif_addr_unlock(netdev); 1864 netif_addr_unlock_bh(netdev);
1863 1865
1864 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 1866 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1865 down_read(&priv->vlan_rwsem); 1867 down_read(&priv->vlan_rwsem);
@@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
1875 union ib_gid *gid = (union ib_gid *)(ss->__data + 4); 1877 union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
1876 int ret = 0; 1878 int ret = 0;
1877 1879
1878 netif_addr_lock(dev); 1880 netif_addr_lock_bh(dev);
1879 1881
1880 /* Make sure the QPN, reserved and subnet prefix match the current 1882 /* Make sure the QPN, reserved and subnet prefix match the current
1881 * lladdr, it also makes sure the lladdr is unicast. 1883 * lladdr, it also makes sure the lladdr is unicast.
@@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
1885 gid->global.interface_id == 0) 1887 gid->global.interface_id == 0)
1886 ret = -EINVAL; 1888 ret = -EINVAL;
1887 1889
1888 netif_addr_unlock(dev); 1890 netif_addr_unlock_bh(dev);
1889 1891
1890 return ret; 1892 return ret;
1891} 1893}
@@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2141 ib_unregister_event_handler(&priv->event_handler); 2143 ib_unregister_event_handler(&priv->event_handler);
2142 flush_workqueue(ipoib_workqueue); 2144 flush_workqueue(ipoib_workqueue);
2143 2145
2146 /* mark interface in the middle of destruction */
2147 set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
2148
2144 rtnl_lock(); 2149 rtnl_lock();
2145 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 2150 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
2146 rtnl_unlock(); 2151 rtnl_unlock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 82fbc9442608..d3394b6add24 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
582 return; 582 return;
583 } 583 }
584 priv->local_lid = port_attr.lid; 584 priv->local_lid = port_attr.lid;
585 netif_addr_lock(dev); 585 netif_addr_lock_bh(dev);
586 586
587 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { 587 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
588 netif_addr_unlock(dev); 588 netif_addr_unlock_bh(dev);
589 return; 589 return;
590 } 590 }
591 netif_addr_unlock(dev); 591 netif_addr_unlock_bh(dev);
592 592
593 spin_lock_irq(&priv->lock); 593 spin_lock_irq(&priv->lock);
594 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) 594 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 64a35595eab8..a2f9f29c6ab5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
131 131
132 ppriv = netdev_priv(pdev); 132 ppriv = netdev_priv(pdev);
133 133
134 if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
135 return -EPERM;
136
134 snprintf(intf_name, sizeof intf_name, "%s.%04x", 137 snprintf(intf_name, sizeof intf_name, "%s.%04x",
135 ppriv->dev->name, pkey); 138 ppriv->dev->name, pkey);
136 priv = ipoib_intf_alloc(intf_name); 139 priv = ipoib_intf_alloc(intf_name);
@@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
183 186
184 ppriv = netdev_priv(pdev); 187 ppriv = netdev_priv(pdev);
185 188
189 if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
190 return -EPERM;
191
186 if (!rtnl_trylock()) 192 if (!rtnl_trylock())
187 return restart_syscall(); 193 return restart_syscall();
188 194
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 646de170ec12..3322ed750172 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1457{ 1457{
1458 unsigned int sg_offset = 0; 1458 unsigned int sg_offset = 0;
1459 1459
1460 state->desc = req->indirect_desc;
1461 state->fr.next = req->fr_list; 1460 state->fr.next = req->fr_list;
1462 state->fr.end = req->fr_list + ch->target->mr_per_cmd; 1461 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1463 state->sg = scat; 1462 state->sg = scat;
@@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1489 struct scatterlist *sg; 1488 struct scatterlist *sg;
1490 int i; 1489 int i;
1491 1490
1492 state->desc = req->indirect_desc;
1493 for_each_sg(scat, sg, count, i) { 1491 for_each_sg(scat, sg, count, i) {
1494 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 1492 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1495 ib_sg_dma_len(dev->dev, sg), 1493 ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1655 target->indirect_size, DMA_TO_DEVICE); 1653 target->indirect_size, DMA_TO_DEVICE);
1656 1654
1657 memset(&state, 0, sizeof(state)); 1655 memset(&state, 0, sizeof(state));
1656 state.desc = req->indirect_desc;
1658 if (dev->use_fast_reg) 1657 if (dev->use_fast_reg)
1659 ret = srp_map_sg_fr(&state, ch, req, scat, count); 1658 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1660 else if (dev->use_fmr) 1659 else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
3526 int mr_page_shift, p; 3525 int mr_page_shift, p;
3527 u64 max_pages_per_mr; 3526 u64 max_pages_per_mr;
3528 3527
3529 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 3528 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3530 if (!srp_dev) 3529 if (!srp_dev)
3531 return; 3530 return;
3532 3531
@@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
3586 IB_ACCESS_REMOTE_WRITE); 3585 IB_ACCESS_REMOTE_WRITE);
3587 if (IS_ERR(srp_dev->global_mr)) 3586 if (IS_ERR(srp_dev->global_mr))
3588 goto err_pd; 3587 goto err_pd;
3589 } else {
3590 srp_dev->global_mr = NULL;
3591 } 3588 }
3592 3589
3593 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3590 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e68b20cba70b..4a4155640d51 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1638,8 +1638,7 @@ retry:
1638 */ 1638 */
1639 qp_init->cap.max_send_wr = srp_sq_size / 2; 1639 qp_init->cap.max_send_wr = srp_sq_size / 2;
1640 qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; 1640 qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
1641 qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, 1641 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1642 sdev->device->attrs.max_sge);
1643 qp_init->port_num = ch->sport->port; 1642 qp_init->port_num = ch->sport->port;
1644 1643
1645 ch->qp = ib_create_qp(sdev->pd, qp_init); 1644 ch->qp = ib_create_qp(sdev->pd, qp_init);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index fee6bfd7ca21..389030487da7 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -106,6 +106,7 @@ enum {
106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, 106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
107 107
108 SRPT_DEF_SG_TABLESIZE = 128, 108 SRPT_DEF_SG_TABLESIZE = 128,
109 SRPT_DEF_SG_PER_WQE = 16,
109 110
110 MIN_SRPT_SQ_SIZE = 16, 111 MIN_SRPT_SQ_SIZE = 16,
111 DEF_SRPT_SQ_SIZE = 4096, 112 DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 94b68213c50d..5f6b3bcab078 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
1941 .attach_dev = arm_smmu_attach_dev, 1941 .attach_dev = arm_smmu_attach_dev,
1942 .map = arm_smmu_map, 1942 .map = arm_smmu_map,
1943 .unmap = arm_smmu_unmap, 1943 .unmap = arm_smmu_unmap,
1944 .map_sg = default_iommu_map_sg,
1944 .iova_to_phys = arm_smmu_iova_to_phys, 1945 .iova_to_phys = arm_smmu_iova_to_phys,
1945 .add_device = arm_smmu_add_device, 1946 .add_device = arm_smmu_add_device,
1946 .remove_device = arm_smmu_remove_device, 1947 .remove_device = arm_smmu_remove_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a644d0cec2d8..10700945994e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
3222 } 3222 }
3223 } 3223 }
3224 3224
3225 iommu_flush_write_buffer(iommu);
3226 iommu_set_root_entry(iommu);
3227 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3228 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3229
3230 if (!ecap_pass_through(iommu->ecap)) 3225 if (!ecap_pass_through(iommu->ecap))
3231 hw_pass_through = 0; 3226 hw_pass_through = 0;
3232#ifdef CONFIG_INTEL_IOMMU_SVM 3227#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
3235#endif 3230#endif
3236 } 3231 }
3237 3232
3233 /*
3234 * Now that qi is enabled on all iommus, set the root entry and flush
3235 * caches. This is required on some Intel X58 chipsets, otherwise the
3236 * flush_context function will loop forever and the boot hangs.
3237 */
3238 for_each_active_iommu(iommu, drhd) {
3239 iommu_flush_write_buffer(iommu);
3240 iommu_set_root_entry(iommu);
3241 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3242 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3243 }
3244
3238 if (iommu_pass_through) 3245 if (iommu_pass_through)
3239 iommu_identity_mapping |= IDENTMAP_ALL; 3246 iommu_identity_mapping |= IDENTMAP_ALL;
3240 3247
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c7d6156ff536..25b4627cb57f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
815 dte_addr = virt_to_phys(rk_domain->dt); 815 dte_addr = virt_to_phys(rk_domain->dt);
816 for (i = 0; i < iommu->num_mmu; i++) { 816 for (i = 0; i < iommu->num_mmu; i++) {
817 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); 817 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
818 rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 818 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
819 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 819 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
820 } 820 }
821 821
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 6bd881be24ea..5eb1f9e17a98 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
41 41
42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 45
45#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 46#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
46 47
@@ -82,6 +83,7 @@ struct its_node {
82 u64 flags; 83 u64 flags;
83 u32 ite_size; 84 u32 ite_size;
84 u32 device_ids; 85 u32 device_ids;
86 int numa_node;
85}; 87};
86 88
87#define ITS_ITT_ALIGN SZ_256 89#define ITS_ITT_ALIGN SZ_256
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
613static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 615static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
614 bool force) 616 bool force)
615{ 617{
616 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 618 unsigned int cpu;
619 const struct cpumask *cpu_mask = cpu_online_mask;
617 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 620 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
618 struct its_collection *target_col; 621 struct its_collection *target_col;
619 u32 id = its_get_event_id(d); 622 u32 id = its_get_event_id(d);
620 623
624 /* lpi cannot be routed to a redistributor that is on a foreign node */
625 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
626 if (its_dev->its->numa_node >= 0) {
627 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
628 if (!cpumask_intersects(mask_val, cpu_mask))
629 return -EINVAL;
630 }
631 }
632
633 cpu = cpumask_any_and(mask_val, cpu_mask);
634
621 if (cpu >= nr_cpu_ids) 635 if (cpu >= nr_cpu_ids)
622 return -EINVAL; 636 return -EINVAL;
623 637
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void)
1101 list_for_each_entry(its, &its_nodes, entry) { 1115 list_for_each_entry(its, &its_nodes, entry) {
1102 u64 target; 1116 u64 target;
1103 1117
1118 /* avoid cross node collections and its mapping */
1119 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1120 struct device_node *cpu_node;
1121
1122 cpu_node = of_get_cpu_node(cpu, NULL);
1123 if (its->numa_node != NUMA_NO_NODE &&
1124 its->numa_node != of_node_to_nid(cpu_node))
1125 continue;
1126 }
1127
1104 /* 1128 /*
1105 * We now have to bind each collection to its target 1129 * We now have to bind each collection to its target
1106 * redistributor. 1130 * redistributor.
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
1351{ 1375{
1352 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1376 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1353 u32 event = its_get_event_id(d); 1377 u32 event = its_get_event_id(d);
1378 const struct cpumask *cpu_mask = cpu_online_mask;
1379
1380 /* get the cpu_mask of local node */
1381 if (its_dev->its->numa_node >= 0)
1382 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1354 1383
1355 /* Bind the LPI to the first possible CPU */ 1384 /* Bind the LPI to the first possible CPU */
1356 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); 1385 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
1357 1386
1358 /* Map the GIC IRQ and event to the device */ 1387 /* Map the GIC IRQ and event to the device */
1359 its_send_mapvi(its_dev, d->hwirq, event); 1388 its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1443 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 1472 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1444} 1473}
1445 1474
1475static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1476{
1477 struct its_node *its = data;
1478
1479 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1480}
1481
1446static const struct gic_quirk its_quirks[] = { 1482static const struct gic_quirk its_quirks[] = {
1447#ifdef CONFIG_CAVIUM_ERRATUM_22375 1483#ifdef CONFIG_CAVIUM_ERRATUM_22375
1448 { 1484 {
@@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = {
1452 .init = its_enable_quirk_cavium_22375, 1488 .init = its_enable_quirk_cavium_22375,
1453 }, 1489 },
1454#endif 1490#endif
1491#ifdef CONFIG_CAVIUM_ERRATUM_23144
1492 {
1493 .desc = "ITS: Cavium erratum 23144",
1494 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1495 .mask = 0xffff0fff,
1496 .init = its_enable_quirk_cavium_23144,
1497 },
1498#endif
1455 { 1499 {
1456 } 1500 }
1457}; 1501};
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
1514 its->base = its_base; 1558 its->base = its_base;
1515 its->phys_base = res.start; 1559 its->phys_base = res.start;
1516 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1560 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1561 its->numa_node = of_node_to_nid(node);
1517 1562
1518 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 1563 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1519 if (!its->cmd_base) { 1564 if (!its->cmd_base) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fb042ba9a3db..2c5ba0e704bf 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable)
155 155
156 while (count--) { 156 while (count--) {
157 val = readl_relaxed(rbase + GICR_WAKER); 157 val = readl_relaxed(rbase + GICR_WAKER);
158 if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) 158 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
159 break; 159 break;
160 cpu_relax(); 160 cpu_relax();
161 udelay(1); 161 udelay(1);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 3b5e10aa48ab..8a4adbeb2b8c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
746 /* verify that it doesn't conflict with an IPI irq */ 746 /* verify that it doesn't conflict with an IPI irq */
747 if (test_bit(spec->hwirq, ipi_resrv)) 747 if (test_bit(spec->hwirq, ipi_resrv))
748 return -EBUSY; 748 return -EBUSY;
749
750 hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
751
752 return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
753 &gic_level_irq_controller,
754 NULL);
749 } else { 755 } else {
750 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 756 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
751 if (base_hwirq == gic_shared_intrs) { 757 if (base_hwirq == gic_shared_intrs) {
@@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
867 &gic_level_irq_controller, 873 &gic_level_irq_controller,
868 NULL); 874 NULL);
869 if (ret) 875 if (ret)
870 return ret; 876 goto error;
871 } 877 }
872 878
873 return 0; 879 return 0;
880
881error:
882 irq_domain_free_irqs_parent(d, virq, nr_irqs);
883 return ret;
874} 884}
875 885
876void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, 886void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index e7155db01d55..73addb4b625b 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
91 /* set polarity for external interrupts only */ 91 /* set polarity for external interrupts only */
92 for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { 92 for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
93 if (priv->ext_irqs[i] == data->hwirq) { 93 if (priv->ext_irqs[i] == data->hwirq) {
94 ret = pic32_set_ext_polarity(i + 1, flow_type); 94 ret = pic32_set_ext_polarity(i, flow_type);
95 if (ret) 95 if (ret)
96 return ret; 96 return ret;
97 } 97 }
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3495d5d6547f..3bce44893021 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
53 53
54 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { 54 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
55 led_set_brightness_nosleep(led_cdev, LED_OFF); 55 led_set_brightness_nosleep(led_cdev, LED_OFF);
56 led_cdev->flags &= ~LED_BLINK_SW;
56 return; 57 return;
57 } 58 }
58 59
59 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { 60 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
60 led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; 61 led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
61 return; 62 return;
62 } 63 }
63 64
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
151 return; 152 return;
152 } 153 }
153 154
155 led_cdev->flags |= LED_BLINK_SW;
154 mod_timer(&led_cdev->blink_timer, jiffies + 1); 156 mod_timer(&led_cdev->blink_timer, jiffies + 1);
155} 157}
156 158
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
219 del_timer_sync(&led_cdev->blink_timer); 221 del_timer_sync(&led_cdev->blink_timer);
220 led_cdev->blink_delay_on = 0; 222 led_cdev->blink_delay_on = 0;
221 led_cdev->blink_delay_off = 0; 223 led_cdev->blink_delay_off = 0;
224 led_cdev->flags &= ~LED_BLINK_SW;
222} 225}
223EXPORT_SYMBOL_GPL(led_stop_software_blink); 226EXPORT_SYMBOL_GPL(led_stop_software_blink);
224 227
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
226 enum led_brightness brightness) 229 enum led_brightness brightness)
227{ 230{
228 /* 231 /*
229 * In case blinking is on delay brightness setting 232 * If software blink is active, delay brightness setting
230 * until the next timer tick. 233 * until the next timer tick.
231 */ 234 */
232 if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { 235 if (led_cdev->flags & LED_BLINK_SW) {
233 /* 236 /*
234 * If we need to disable soft blinking delegate this to the 237 * If we need to disable soft blinking delegate this to the
235 * work queue task to avoid problems in case we are called 238 * work queue task to avoid problems in case we are called
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 410c39c62dc7..c9f386213e9e 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/reboot.h> 21#include <linux/reboot.h>
22#include <linux/suspend.h>
22#include "../leds.h" 23#include "../leds.h"
23 24
24static int panic_heartbeats; 25static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
154 .deactivate = heartbeat_trig_deactivate, 155 .deactivate = heartbeat_trig_deactivate,
155}; 156};
156 157
158static int heartbeat_pm_notifier(struct notifier_block *nb,
159 unsigned long pm_event, void *unused)
160{
161 int rc;
162
163 switch (pm_event) {
164 case PM_SUSPEND_PREPARE:
165 case PM_HIBERNATION_PREPARE:
166 case PM_RESTORE_PREPARE:
167 led_trigger_unregister(&heartbeat_led_trigger);
168 break;
169 case PM_POST_SUSPEND:
170 case PM_POST_HIBERNATION:
171 case PM_POST_RESTORE:
172 rc = led_trigger_register(&heartbeat_led_trigger);
173 if (rc)
174 pr_err("could not re-register heartbeat trigger\n");
175 break;
176 default:
177 break;
178 }
179 return NOTIFY_DONE;
180}
181
157static int heartbeat_reboot_notifier(struct notifier_block *nb, 182static int heartbeat_reboot_notifier(struct notifier_block *nb,
158 unsigned long code, void *unused) 183 unsigned long code, void *unused)
159{ 184{
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
168 return NOTIFY_DONE; 193 return NOTIFY_DONE;
169} 194}
170 195
196static struct notifier_block heartbeat_pm_nb = {
197 .notifier_call = heartbeat_pm_notifier,
198};
199
171static struct notifier_block heartbeat_reboot_nb = { 200static struct notifier_block heartbeat_reboot_nb = {
172 .notifier_call = heartbeat_reboot_notifier, 201 .notifier_call = heartbeat_reboot_notifier,
173}; 202};
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
184 atomic_notifier_chain_register(&panic_notifier_list, 213 atomic_notifier_chain_register(&panic_notifier_list,
185 &heartbeat_panic_nb); 214 &heartbeat_panic_nb);
186 register_reboot_notifier(&heartbeat_reboot_nb); 215 register_reboot_notifier(&heartbeat_reboot_nb);
216 register_pm_notifier(&heartbeat_pm_nb);
187 } 217 }
188 return rc; 218 return rc;
189} 219}
190 220
191static void __exit heartbeat_trig_exit(void) 221static void __exit heartbeat_trig_exit(void)
192{ 222{
223 unregister_pm_notifier(&heartbeat_pm_nb);
193 unregister_reboot_notifier(&heartbeat_reboot_nb); 224 unregister_reboot_notifier(&heartbeat_reboot_nb);
194 atomic_notifier_chain_unregister(&panic_notifier_list, 225 atomic_notifier_chain_unregister(&panic_notifier_list,
195 &heartbeat_panic_nb); 226 &heartbeat_panic_nb);
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b73c6e7d28e4..6f2c8522e14a 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
61 struct mcb_driver *mdrv = to_mcb_driver(dev->driver); 61 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
62 struct mcb_device *mdev = to_mcb_device(dev); 62 struct mcb_device *mdev = to_mcb_device(dev);
63 const struct mcb_device_id *found_id; 63 const struct mcb_device_id *found_id;
64 struct module *carrier_mod;
65 int ret;
64 66
65 found_id = mcb_match_id(mdrv->id_table, mdev); 67 found_id = mcb_match_id(mdrv->id_table, mdev);
66 if (!found_id) 68 if (!found_id)
67 return -ENODEV; 69 return -ENODEV;
68 70
69 return mdrv->probe(mdev, found_id); 71 carrier_mod = mdev->dev.parent->driver->owner;
72 if (!try_module_get(carrier_mod))
73 return -EINVAL;
74
75 get_device(dev);
76 ret = mdrv->probe(mdev, found_id);
77 if (ret)
78 module_put(carrier_mod);
79
80 return ret;
70} 81}
71 82
72static int mcb_remove(struct device *dev) 83static int mcb_remove(struct device *dev)
73{ 84{
74 struct mcb_driver *mdrv = to_mcb_driver(dev->driver); 85 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
75 struct mcb_device *mdev = to_mcb_device(dev); 86 struct mcb_device *mdev = to_mcb_device(dev);
87 struct module *carrier_mod;
76 88
77 mdrv->remove(mdev); 89 mdrv->remove(mdev);
78 90
91 carrier_mod = mdev->dev.parent->driver->owner;
92 module_put(carrier_mod);
93
79 put_device(&mdev->dev); 94 put_device(&mdev->dev);
80 95
81 return 0; 96 return 0;
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 70c28d19ea04..22cf60991df6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -45,7 +45,7 @@
45#include <media/v4l2-ioctl.h> 45#include <media/v4l2-ioctl.h>
46 46
47#include <video/omapvrfb.h> 47#include <video/omapvrfb.h>
48#include <video/omapdss.h> 48#include <video/omapfb_dss.h>
49 49
50#include "omap_voutlib.h" 50#include "omap_voutlib.h"
51#include "omap_voutdef.h" 51#include "omap_voutdef.h"
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 9ccfe1f475a4..94b5d65afb19 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -11,7 +11,7 @@
11#ifndef OMAP_VOUTDEF_H 11#ifndef OMAP_VOUTDEF_H
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <video/omapdss.h> 14#include <video/omapfb_dss.h>
15#include <video/omapvrfb.h> 15#include <video/omapvrfb.h>
16 16
17#define YUYV_BPP 2 17#define YUYV_BPP 2
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 80b0d88f125c..58a25fdf0cce 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
26 26
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "omap_voutlib.h" 31#include "omap_voutlib.h"
32 32
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index d7723ce772b3..c04bc6afb965 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 {
1274static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, 1274static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1275 const struct uvc_xu_control_mapping32 __user *up) 1275 const struct uvc_xu_control_mapping32 __user *up)
1276{ 1276{
1277 struct uvc_menu_info __user *umenus;
1278 struct uvc_menu_info __user *kmenus;
1279 compat_caddr_t p; 1277 compat_caddr_t p;
1280 1278
1281 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1279 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1292 1290
1293 if (__get_user(p, &up->menu_info)) 1291 if (__get_user(p, &up->menu_info))
1294 return -EFAULT; 1292 return -EFAULT;
1295 umenus = compat_ptr(p); 1293 kp->menu_info = compat_ptr(p);
1296 if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
1297 return -EFAULT;
1298
1299 kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
1300 if (kmenus == NULL)
1301 return -EFAULT;
1302 kp->menu_info = kmenus;
1303
1304 if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
1305 return -EFAULT;
1306 1294
1307 return 0; 1295 return 0;
1308} 1296}
@@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1310static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, 1298static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
1311 struct uvc_xu_control_mapping32 __user *up) 1299 struct uvc_xu_control_mapping32 __user *up)
1312{ 1300{
1313 struct uvc_menu_info __user *umenus;
1314 struct uvc_menu_info __user *kmenus = kp->menu_info;
1315 compat_caddr_t p;
1316
1317 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1301 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
1318 __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || 1302 __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
1319 __put_user(kp->menu_count, &up->menu_count)) 1303 __put_user(kp->menu_count, &up->menu_count))
@@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
1322 if (__clear_user(up->reserved, sizeof(up->reserved))) 1306 if (__clear_user(up->reserved, sizeof(up->reserved)))
1323 return -EFAULT; 1307 return -EFAULT;
1324 1308
1325 if (kp->menu_count == 0)
1326 return 0;
1327
1328 if (get_user(p, &up->menu_info))
1329 return -EFAULT;
1330 umenus = compat_ptr(p);
1331
1332 if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
1333 return -EFAULT;
1334
1335 return 0; 1309 return 0;
1336} 1310}
1337 1311
@@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 {
1346static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, 1320static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1347 const struct uvc_xu_control_query32 __user *up) 1321 const struct uvc_xu_control_query32 __user *up)
1348{ 1322{
1349 u8 __user *udata;
1350 u8 __user *kdata;
1351 compat_caddr_t p; 1323 compat_caddr_t p;
1352 1324
1353 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1325 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1361 1333
1362 if (__get_user(p, &up->data)) 1334 if (__get_user(p, &up->data))
1363 return -EFAULT; 1335 return -EFAULT;
1364 udata = compat_ptr(p); 1336 kp->data = compat_ptr(p);
1365 if (!access_ok(VERIFY_READ, udata, kp->size))
1366 return -EFAULT;
1367
1368 kdata = compat_alloc_user_space(kp->size);
1369 if (kdata == NULL)
1370 return -EFAULT;
1371 kp->data = kdata;
1372
1373 if (copy_in_user(kdata, udata, kp->size))
1374 return -EFAULT;
1375 1337
1376 return 0; 1338 return 0;
1377} 1339}
@@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1379static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, 1341static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
1380 struct uvc_xu_control_query32 __user *up) 1342 struct uvc_xu_control_query32 __user *up)
1381{ 1343{
1382 u8 __user *udata;
1383 u8 __user *kdata = kp->data;
1384 compat_caddr_t p;
1385
1386 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1344 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
1387 __copy_to_user(up, kp, offsetof(typeof(*up), data))) 1345 __copy_to_user(up, kp, offsetof(typeof(*up), data)))
1388 return -EFAULT; 1346 return -EFAULT;
1389 1347
1390 if (kp->size == 0)
1391 return 0;
1392
1393 if (get_user(p, &up->data))
1394 return -EFAULT;
1395 udata = compat_ptr(p);
1396 if (!access_ok(VERIFY_READ, udata, kp->size))
1397 return -EFAULT;
1398
1399 if (copy_in_user(udata, kdata, kp->size))
1400 return -EFAULT;
1401
1402 return 0; 1348 return 0;
1403} 1349}
1404 1350
@@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
1408static long uvc_v4l2_compat_ioctl32(struct file *file, 1354static long uvc_v4l2_compat_ioctl32(struct file *file,
1409 unsigned int cmd, unsigned long arg) 1355 unsigned int cmd, unsigned long arg)
1410{ 1356{
1357 struct uvc_fh *handle = file->private_data;
1411 union { 1358 union {
1412 struct uvc_xu_control_mapping xmap; 1359 struct uvc_xu_control_mapping xmap;
1413 struct uvc_xu_control_query xqry; 1360 struct uvc_xu_control_query xqry;
1414 } karg; 1361 } karg;
1415 void __user *up = compat_ptr(arg); 1362 void __user *up = compat_ptr(arg);
1416 mm_segment_t old_fs;
1417 long ret; 1363 long ret;
1418 1364
1419 switch (cmd) { 1365 switch (cmd) {
1420 case UVCIOC_CTRL_MAP32: 1366 case UVCIOC_CTRL_MAP32:
1421 cmd = UVCIOC_CTRL_MAP;
1422 ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); 1367 ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
1368 if (ret)
1369 return ret;
1370 ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
1371 if (ret)
1372 return ret;
1373 ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
1374 if (ret)
1375 return ret;
1376
1423 break; 1377 break;
1424 1378
1425 case UVCIOC_CTRL_QUERY32: 1379 case UVCIOC_CTRL_QUERY32:
1426 cmd = UVCIOC_CTRL_QUERY;
1427 ret = uvc_v4l2_get_xu_query(&karg.xqry, up); 1380 ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
1381 if (ret)
1382 return ret;
1383 ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
1384 if (ret)
1385 return ret;
1386 ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
1387 if (ret)
1388 return ret;
1428 break; 1389 break;
1429 1390
1430 default: 1391 default:
1431 return -ENOIOCTLCMD; 1392 return -ENOIOCTLCMD;
1432 } 1393 }
1433 1394
1434 old_fs = get_fs();
1435 set_fs(KERNEL_DS);
1436 ret = video_ioctl2(file, cmd, (unsigned long)&karg);
1437 set_fs(old_fs);
1438
1439 if (ret < 0)
1440 return ret;
1441
1442 switch (cmd) {
1443 case UVCIOC_CTRL_MAP:
1444 ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
1445 break;
1446
1447 case UVCIOC_CTRL_QUERY:
1448 ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
1449 break;
1450 }
1451
1452 return ret; 1395 return ret;
1453} 1396}
1454#endif 1397#endif
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index ca94bded3386..8bef4331bd51 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Media Controller ancillary functions 2 * Media Controller ancillary functions
3 * 3 *
4 * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4 * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
5 * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> 5 * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
6 * Copyright (C) 2006-2010 Nokia Corporation 6 * Copyright (C) 2006-2010 Nokia Corporation
7 * Copyright (c) 2016 Intel Corporation. 7 * Copyright (c) 2016 Intel Corporation.
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index af4884ba6b7c..15508df24e5d 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
398 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, 398 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
399 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); 399 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
400 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, 400 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
401 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); 401 GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
402 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, 402 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
403 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, 403 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
404 p->cycle2cyclesamecsen); 404 p->cycle2cyclesamecsen);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index eed254da63a8..641c1a566687 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
730 /* synchronized under device mutex */ 730 /* synchronized under device mutex */
731 if (waitqueue_active(&cl->wait)) { 731 if (waitqueue_active(&cl->wait)) {
732 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 732 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
733 wake_up_interruptible(&cl->wait); 733 wake_up(&cl->wait);
734 } 734 }
735} 735}
736 736
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c984321d1881..5d438ad3ee32 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
1276 * switch to HS200 mode if bus width is set successfully. 1276 * switch to HS200 mode if bus width is set successfully.
1277 */ 1277 */
1278 err = mmc_select_bus_width(card); 1278 err = mmc_select_bus_width(card);
1279 if (!err) { 1279 if (err >= 0) {
1280 val = EXT_CSD_TIMING_HS200 | 1280 val = EXT_CSD_TIMING_HS200 |
1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1583 } else if (mmc_card_hs(card)) { 1583 } else if (mmc_card_hs(card)) {
1584 /* Select the desired bus width optionally */ 1584 /* Select the desired bus width optionally */
1585 err = mmc_select_bus_width(card); 1585 err = mmc_select_bus_width(card);
1586 if (!err) { 1586 if (err >= 0) {
1587 err = mmc_select_hs_ddr(card); 1587 err = mmc_select_hs_ddr(card);
1588 if (err) 1588 if (err)
1589 goto free_card; 1589 goto free_card;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 7fc8b7aa83f0..2ee4c21ec55e 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
970 [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 970 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
971 [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 971 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
972 [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, 972 [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
973 [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, 973 [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 },
974 [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, 974 [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
975}; 975};
976 976
977static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 977static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1137 ret = mmc_of_parse(mmc); 1132 ret = mmc_of_parse(mmc);
1138 if (ret) 1133 if (ret)
1139 goto error_free_dma; 1134 goto error_free_dma;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 16baeb51b2bd..ef3618299494 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1147 */ 1147 */
1148static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1148static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1149{ 1149{
1150 struct kstat stat;
1151 int err, minor; 1150 int err, minor;
1151 struct path path;
1152 struct kstat stat;
1152 1153
1153 /* Probably this is an MTD character device node path */ 1154 /* Probably this is an MTD character device node path */
1154 err = vfs_stat(mtd_dev, &stat); 1155 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1156 if (err)
1157 return ERR_PTR(err);
1158
1159 err = vfs_getattr(&path, &stat);
1160 path_put(&path);
1155 if (err) 1161 if (err)
1156 return ERR_PTR(err); 1162 return ERR_PTR(err);
1157 1163
@@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1160 return ERR_PTR(-EINVAL); 1166 return ERR_PTR(-EINVAL);
1161 1167
1162 minor = MINOR(stat.rdev); 1168 minor = MINOR(stat.rdev);
1169
1163 if (minor & 1) 1170 if (minor & 1)
1164 /* 1171 /*
1165 * Just do not think the "/dev/mtdrX" devices support is need, 1172 * Just do not think the "/dev/mtdrX" devices support is need,
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5780dd1ba79d..ebf517271d29 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
575 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 575 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
576 struct ubi_volume *vol = ubi->volumes[idx]; 576 struct ubi_volume *vol = ubi->volumes[idx];
577 struct ubi_vid_hdr *vid_hdr; 577 struct ubi_vid_hdr *vid_hdr;
578 uint32_t crc;
578 579
579 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 580 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
580 if (!vid_hdr) 581 if (!vid_hdr)
@@ -599,14 +600,8 @@ retry:
599 goto out_put; 600 goto out_put;
600 } 601 }
601 602
602 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 603 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
603 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
604 if (err) {
605 up_read(&ubi->fm_eba_sem);
606 goto write_error;
607 }
608 604
609 data_size = offset + len;
610 mutex_lock(&ubi->buf_mutex); 605 mutex_lock(&ubi->buf_mutex);
611 memset(ubi->peb_buf + offset, 0xFF, len); 606 memset(ubi->peb_buf + offset, 0xFF, len);
612 607
@@ -621,6 +616,19 @@ retry:
621 616
622 memcpy(ubi->peb_buf + offset, buf, len); 617 memcpy(ubi->peb_buf + offset, buf, len);
623 618
619 data_size = offset + len;
620 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
621 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
622 vid_hdr->copy_flag = 1;
623 vid_hdr->data_size = cpu_to_be32(data_size);
624 vid_hdr->data_crc = cpu_to_be32(crc);
625 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
626 if (err) {
627 mutex_unlock(&ubi->buf_mutex);
628 up_read(&ubi->fm_eba_sem);
629 goto write_error;
630 }
631
624 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 632 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
625 if (err) { 633 if (err) {
626 mutex_unlock(&ubi->buf_mutex); 634 mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 348dbbcbedc8..a9e2cef7c95c 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
302struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) 302struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
303{ 303{
304 int error, ubi_num, vol_id; 304 int error, ubi_num, vol_id;
305 struct path path;
305 struct kstat stat; 306 struct kstat stat;
306 307
307 dbg_gen("open volume %s, mode %d", pathname, mode); 308 dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
309 if (!pathname || !*pathname) 310 if (!pathname || !*pathname)
310 return ERR_PTR(-EINVAL); 311 return ERR_PTR(-EINVAL);
311 312
312 error = vfs_stat(pathname, &stat); 313 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
314 if (error)
315 return ERR_PTR(error);
316
317 error = vfs_getattr(&path, &stat);
318 path_put(&path);
313 if (error) 319 if (error)
314 return ERR_PTR(error); 320 return ERR_PTR(error);
315 321
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550eff..058460bdd5a6 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
141 priv->bus = bus; 141 priv->bus = bus;
142 bus->priv = priv; 142 bus->priv = priv;
143 bus->parent = priv->dev; 143 bus->parent = priv->dev;
144 bus->name = "Synopsys MII Bus", 144 bus->name = "Synopsys MII Bus";
145 bus->read = &arc_mdio_read; 145 bus->read = &arc_mdio_read;
146 bus->write = &arc_mdio_write; 146 bus->write = &arc_mdio_write;
147 bus->reset = &arc_mdio_reset; 147 bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..d02c4240b7df 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,6 +96,10 @@ struct alx_priv {
96 unsigned int rx_ringsz; 96 unsigned int rx_ringsz;
97 unsigned int rxbuf_size; 97 unsigned int rxbuf_size;
98 98
99 struct page *rx_page;
100 unsigned int rx_page_offset;
101 unsigned int rx_frag_size;
102
99 struct napi_struct napi; 103 struct napi_struct napi;
100 struct alx_tx_queue txq; 104 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq; 105 struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 9fe8b5e310d1..c98acdc0d14f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
70 } 70 }
71} 71}
72 72
73static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
74{
75 struct sk_buff *skb;
76 struct page *page;
77
78 if (alx->rx_frag_size > PAGE_SIZE)
79 return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
80
81 page = alx->rx_page;
82 if (!page) {
83 alx->rx_page = page = alloc_page(gfp);
84 if (unlikely(!page))
85 return NULL;
86 alx->rx_page_offset = 0;
87 }
88
89 skb = build_skb(page_address(page) + alx->rx_page_offset,
90 alx->rx_frag_size);
91 if (likely(skb)) {
92 alx->rx_page_offset += alx->rx_frag_size;
93 if (alx->rx_page_offset >= PAGE_SIZE)
94 alx->rx_page = NULL;
95 else
96 get_page(page);
97 }
98 return skb;
99}
100
101
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 102static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{ 103{
75 struct alx_rx_queue *rxq = &alx->rxq; 104 struct alx_rx_queue *rxq = &alx->rxq;
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
86 while (!cur_buf->skb && next != rxq->read_idx) { 115 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur]; 116 struct alx_rfd *rfd = &rxq->rfd[cur];
88 117
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); 118 skb = alx_alloc_skb(alx, gfp);
90 if (!skb) 119 if (!skb)
91 break; 120 break;
92 dma = dma_map_single(&alx->hw.pdev->dev, 121 dma = dma_map_single(&alx->hw.pdev->dev,
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); 153 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 } 154 }
126 155
156
127 return count; 157 return count;
128} 158}
129 159
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
592 kfree(alx->txq.bufs); 622 kfree(alx->txq.bufs);
593 kfree(alx->rxq.bufs); 623 kfree(alx->rxq.bufs);
594 624
625 if (alx->rx_page) {
626 put_page(alx->rx_page);
627 alx->rx_page = NULL;
628 }
629
595 dma_free_coherent(&alx->hw.pdev->dev, 630 dma_free_coherent(&alx->hw.pdev->dev,
596 alx->descmem.size, 631 alx->descmem.size,
597 alx->descmem.virt, 632 alx->descmem.virt,
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
646 alx->dev->name, alx); 681 alx->dev->name, alx);
647 if (!err) 682 if (!err)
648 goto out; 683 goto out;
684
649 /* fall back to legacy interrupt */ 685 /* fall back to legacy interrupt */
650 pci_disable_msi(alx->hw.pdev); 686 pci_disable_msi(alx->hw.pdev);
651 } 687 }
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
689 struct pci_dev *pdev = alx->hw.pdev; 725 struct pci_dev *pdev = alx->hw.pdev;
690 struct alx_hw *hw = &alx->hw; 726 struct alx_hw *hw = &alx->hw;
691 int err; 727 int err;
728 unsigned int head_size;
692 729
693 err = alx_identify_hw(alx); 730 err = alx_identify_hw(alx);
694 if (err) { 731 if (err) {
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
704 741
705 hw->smb_timer = 400; 742 hw->smb_timer = 400;
706 hw->mtu = alx->dev->mtu; 743 hw->mtu = alx->dev->mtu;
744
707 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); 745 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
746 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
747 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
748 alx->rx_frag_size = roundup_pow_of_two(head_size);
749
708 alx->tx_ringsz = 256; 750 alx->tx_ringsz = 256;
709 alx->rx_ringsz = 512; 751 alx->rx_ringsz = 512;
710 hw->imt = 200; 752 hw->imt = 200;
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
806{ 848{
807 struct alx_priv *alx = netdev_priv(netdev); 849 struct alx_priv *alx = netdev_priv(netdev);
808 int max_frame = ALX_MAX_FRAME_LEN(mtu); 850 int max_frame = ALX_MAX_FRAME_LEN(mtu);
851 unsigned int head_size;
809 852
810 if ((max_frame < ALX_MIN_FRAME_SIZE) || 853 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
811 (max_frame > ALX_MAX_FRAME_SIZE)) 854 (max_frame > ALX_MAX_FRAME_SIZE))
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
817 netdev->mtu = mtu; 860 netdev->mtu = mtu;
818 alx->hw.mtu = mtu; 861 alx->hw.mtu = mtu;
819 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 862 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
863 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
864 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
865 alx->rx_frag_size = roundup_pow_of_two(head_size);
820 netdev_update_features(netdev); 866 netdev_update_features(netdev);
821 if (netif_running(netdev)) 867 if (netif_running(netdev))
822 alx_reinit(alx); 868 alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0a5b770cefaa..a59d55e25d5f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12895 return rc; 12895 return rc;
12896} 12896}
12897 12897
12898int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 12898static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12899{ 12899{
12900 struct bnx2x_vlan_entry *vlan; 12900 struct bnx2x_vlan_entry *vlan;
12901 int rc = 0; 12901 int rc = 0;
12902 12902
12903 if (!bp->vlan_cnt) { 12903 /* Configure all non-configured entries */
12904 DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
12905 return 0;
12906 }
12907
12908 list_for_each_entry(vlan, &bp->vlan_reg, link) { 12904 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12909 /* Prepare for cleanup in case of errors */ 12905 if (vlan->hw)
12910 if (rc) {
12911 vlan->hw = false;
12912 continue;
12913 }
12914
12915 if (!vlan->hw)
12916 continue; 12906 continue;
12917 12907
12918 DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); 12908 if (bp->vlan_cnt >= bp->vlan_credit)
12909 return -ENOBUFS;
12919 12910
12920 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); 12911 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12921 if (rc) { 12912 if (rc) {
12922 BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); 12913 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
12923 vlan->hw = false; 12914 return rc;
12924 rc = -EINVAL;
12925 continue;
12926 } 12915 }
12916
12917 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
12918 vlan->hw = true;
12919 bp->vlan_cnt++;
12927 } 12920 }
12928 12921
12929 return rc; 12922 return 0;
12923}
12924
12925static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12926{
12927 bool need_accept_any_vlan;
12928
12929 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12930
12931 if (bp->accept_any_vlan != need_accept_any_vlan) {
12932 bp->accept_any_vlan = need_accept_any_vlan;
12933 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
12934 bp->accept_any_vlan ? "raised" : "cleared");
12935 if (set_rx_mode) {
12936 if (IS_PF(bp))
12937 bnx2x_set_rx_mode_inner(bp);
12938 else
12939 bnx2x_vfpf_storm_rx_mode(bp);
12940 }
12941 }
12942}
12943
12944int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12945{
12946 struct bnx2x_vlan_entry *vlan;
12947
12948 /* The hw forgot all entries after reload */
12949 list_for_each_entry(vlan, &bp->vlan_reg, link)
12950 vlan->hw = false;
12951 bp->vlan_cnt = 0;
12952
12953 /* Don't set rx mode here. Our caller will do it. */
12954 bnx2x_vlan_configure(bp, false);
12955
12956 return 0;
12930} 12957}
12931 12958
12932static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 12959static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12933{ 12960{
12934 struct bnx2x *bp = netdev_priv(dev); 12961 struct bnx2x *bp = netdev_priv(dev);
12935 struct bnx2x_vlan_entry *vlan; 12962 struct bnx2x_vlan_entry *vlan;
12936 bool hw = false;
12937 int rc = 0;
12938
12939 if (!netif_running(bp->dev)) {
12940 DP(NETIF_MSG_IFUP,
12941 "Ignoring VLAN configuration the interface is down\n");
12942 return -EFAULT;
12943 }
12944 12963
12945 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); 12964 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12946 12965
@@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12948 if (!vlan) 12967 if (!vlan)
12949 return -ENOMEM; 12968 return -ENOMEM;
12950 12969
12951 bp->vlan_cnt++;
12952 if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
12953 DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
12954 bp->accept_any_vlan = true;
12955 if (IS_PF(bp))
12956 bnx2x_set_rx_mode_inner(bp);
12957 else
12958 bnx2x_vfpf_storm_rx_mode(bp);
12959 } else if (bp->vlan_cnt <= bp->vlan_credit) {
12960 rc = __bnx2x_vlan_configure_vid(bp, vid, true);
12961 hw = true;
12962 }
12963
12964 vlan->vid = vid; 12970 vlan->vid = vid;
12965 vlan->hw = hw; 12971 vlan->hw = false;
12972 list_add_tail(&vlan->link, &bp->vlan_reg);
12966 12973
12967 if (!rc) { 12974 if (netif_running(dev))
12968 list_add(&vlan->link, &bp->vlan_reg); 12975 bnx2x_vlan_configure(bp, true);
12969 } else {
12970 bp->vlan_cnt--;
12971 kfree(vlan);
12972 }
12973
12974 DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
12975 12976
12976 return rc; 12977 return 0;
12977} 12978}
12978 12979
12979static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 12980static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12980{ 12981{
12981 struct bnx2x *bp = netdev_priv(dev); 12982 struct bnx2x *bp = netdev_priv(dev);
12982 struct bnx2x_vlan_entry *vlan; 12983 struct bnx2x_vlan_entry *vlan;
12984 bool found = false;
12983 int rc = 0; 12985 int rc = 0;
12984 12986
12985 if (!netif_running(bp->dev)) {
12986 DP(NETIF_MSG_IFUP,
12987 "Ignoring VLAN configuration the interface is down\n");
12988 return -EFAULT;
12989 }
12990
12991 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); 12987 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12992 12988
12993 if (!bp->vlan_cnt) {
12994 BNX2X_ERR("Unable to kill VLAN %d\n", vid);
12995 return -EINVAL;
12996 }
12997
12998 list_for_each_entry(vlan, &bp->vlan_reg, link) 12989 list_for_each_entry(vlan, &bp->vlan_reg, link)
12999 if (vlan->vid == vid) 12990 if (vlan->vid == vid) {
12991 found = true;
13000 break; 12992 break;
12993 }
13001 12994
13002 if (vlan->vid != vid) { 12995 if (!found) {
13003 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); 12996 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13004 return -EINVAL; 12997 return -EINVAL;
13005 } 12998 }
13006 12999
13007 if (vlan->hw) 13000 if (netif_running(dev) && vlan->hw) {
13008 rc = __bnx2x_vlan_configure_vid(bp, vid, false); 13001 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13002 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13003 bp->vlan_cnt--;
13004 }
13009 13005
13010 list_del(&vlan->link); 13006 list_del(&vlan->link);
13011 kfree(vlan); 13007 kfree(vlan);
13012 13008
13013 bp->vlan_cnt--; 13009 if (netif_running(dev))
13014 13010 bnx2x_vlan_configure(bp, true);
13015 if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
13016 /* Configure all non-configured entries */
13017 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13018 if (vlan->hw)
13019 continue;
13020
13021 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13022 if (rc) {
13023 BNX2X_ERR("Unable to config VLAN %d\n",
13024 vlan->vid);
13025 continue;
13026 }
13027 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
13028 vlan->vid);
13029 vlan->hw = true;
13030 }
13031 DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
13032 bp->accept_any_vlan = false;
13033 if (IS_PF(bp))
13034 bnx2x_set_rx_mode_inner(bp);
13035 else
13036 bnx2x_vfpf_storm_rx_mode(bp);
13037 }
13038 13011
13039 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); 13012 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13040 13013
@@ -13941,14 +13914,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13941 bp->doorbells = bnx2x_vf_doorbells(bp); 13914 bp->doorbells = bnx2x_vf_doorbells(bp);
13942 rc = bnx2x_vf_pci_alloc(bp); 13915 rc = bnx2x_vf_pci_alloc(bp);
13943 if (rc) 13916 if (rc)
13944 goto init_one_exit; 13917 goto init_one_freemem;
13945 } else { 13918 } else {
13946 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 13919 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13947 if (doorbell_size > pci_resource_len(pdev, 2)) { 13920 if (doorbell_size > pci_resource_len(pdev, 2)) {
13948 dev_err(&bp->pdev->dev, 13921 dev_err(&bp->pdev->dev,
13949 "Cannot map doorbells, bar size too small, aborting\n"); 13922 "Cannot map doorbells, bar size too small, aborting\n");
13950 rc = -ENOMEM; 13923 rc = -ENOMEM;
13951 goto init_one_exit; 13924 goto init_one_freemem;
13952 } 13925 }
13953 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 13926 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13954 doorbell_size); 13927 doorbell_size);
@@ -13957,19 +13930,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13957 dev_err(&bp->pdev->dev, 13930 dev_err(&bp->pdev->dev,
13958 "Cannot map doorbell space, aborting\n"); 13931 "Cannot map doorbell space, aborting\n");
13959 rc = -ENOMEM; 13932 rc = -ENOMEM;
13960 goto init_one_exit; 13933 goto init_one_freemem;
13961 } 13934 }
13962 13935
13963 if (IS_VF(bp)) { 13936 if (IS_VF(bp)) {
13964 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 13937 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13965 if (rc) 13938 if (rc)
13966 goto init_one_exit; 13939 goto init_one_freemem;
13967 } 13940 }
13968 13941
13969 /* Enable SRIOV if capability found in configuration space */ 13942 /* Enable SRIOV if capability found in configuration space */
13970 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 13943 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13971 if (rc) 13944 if (rc)
13972 goto init_one_exit; 13945 goto init_one_freemem;
13973 13946
13974 /* calc qm_cid_count */ 13947 /* calc qm_cid_count */
13975 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 13948 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13961,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13988 rc = bnx2x_set_int_mode(bp); 13961 rc = bnx2x_set_int_mode(bp);
13989 if (rc) { 13962 if (rc) {
13990 dev_err(&pdev->dev, "Cannot set interrupts\n"); 13963 dev_err(&pdev->dev, "Cannot set interrupts\n");
13991 goto init_one_exit; 13964 goto init_one_freemem;
13992 } 13965 }
13993 BNX2X_DEV_INFO("set interrupts successfully\n"); 13966 BNX2X_DEV_INFO("set interrupts successfully\n");
13994 13967
@@ -13996,7 +13969,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13996 rc = register_netdev(dev); 13969 rc = register_netdev(dev);
13997 if (rc) { 13970 if (rc) {
13998 dev_err(&pdev->dev, "Cannot register net device\n"); 13971 dev_err(&pdev->dev, "Cannot register net device\n");
13999 goto init_one_exit; 13972 goto init_one_freemem;
14000 } 13973 }
14001 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 13974 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14002 13975
@@ -14029,6 +14002,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
14029 14002
14030 return 0; 14003 return 0;
14031 14004
14005init_one_freemem:
14006 bnx2x_free_mem_bp(bp);
14007
14032init_one_exit: 14008init_one_exit:
14033 bnx2x_disable_pcie_error_reporting(bp); 14009 bnx2x_disable_pcie_error_reporting(bp);
14034 14010
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72a2efff8e49..c777cde85ce4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
286 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 286 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
287 txr->tx_prod = prod; 287 txr->tx_prod = prod;
288 288
289 tx_buf->is_push = 1;
289 netdev_tx_sent_queue(txq, skb->len); 290 netdev_tx_sent_queue(txq, skb->len);
291 wmb(); /* Sync is_push and byte queue before pushing data */
290 292
291 push_len = (length + sizeof(*tx_push) + 7) / 8; 293 push_len = (length + sizeof(*tx_push) + 7) / 8;
292 if (push_len > 16) { 294 if (push_len > 16) {
@@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
298 push_len); 300 push_len);
299 } 301 }
300 302
301 tx_buf->is_push = 1;
302 goto tx_done; 303 goto tx_done;
303 } 304 }
304 305
@@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1112 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1113 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1113 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1114 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1114 1115
1115 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1116 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1116 netdev_features_t features = skb->dev->features; 1117 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1117 u16 vlan_proto = tpa_info->metadata >> 1118 u16 vlan_proto = tpa_info->metadata >>
1118 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1119 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1120 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1119 1121
1120 if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1122 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1121 vlan_proto == ETH_P_8021Q) ||
1122 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1123 vlan_proto == ETH_P_8021AD)) {
1124 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1125 tpa_info->metadata &
1126 RX_CMP_FLAGS2_METADATA_VID_MASK);
1127 }
1128 } 1123 }
1129 1124
1130 skb_checksum_none_assert(skb); 1125 skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1277 1272
1278 skb->protocol = eth_type_trans(skb, dev); 1273 skb->protocol = eth_type_trans(skb, dev);
1279 1274
1280 if (rxcmp1->rx_cmp_flags2 & 1275 if ((rxcmp1->rx_cmp_flags2 &
1281 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { 1276 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1282 netdev_features_t features = skb->dev->features; 1277 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1283 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1278 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1279 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1284 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1280 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1285 1281
1286 if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1282 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1287 vlan_proto == ETH_P_8021Q) ||
1288 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1289 vlan_proto == ETH_P_8021AD))
1290 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1291 meta_data &
1292 RX_CMP_FLAGS2_METADATA_VID_MASK);
1293 } 1283 }
1294 1284
1295 skb_checksum_none_assert(skb); 1285 skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
5466 5456
5467 if (!bnxt_rfs_capable(bp)) 5457 if (!bnxt_rfs_capable(bp))
5468 features &= ~NETIF_F_NTUPLE; 5458 features &= ~NETIF_F_NTUPLE;
5459
5460 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
5461 * turned on or off together.
5462 */
5463 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
5464 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
5465 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
5466 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
5467 NETIF_F_HW_VLAN_STAG_RX);
5468 else
5469 features |= NETIF_F_HW_VLAN_CTAG_RX |
5470 NETIF_F_HW_VLAN_STAG_RX;
5471 }
5472
5469 return features; 5473 return features;
5470} 5474}
5471 5475
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc1261dc..50812a1d67bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
144 CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ 144 CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
145 CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ 145 CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
146 CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ 146 CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
147 CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
147 CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ 148 CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
148 CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ 149 CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
149 CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ 150 CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645100..4edb98c3c6c7 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
1195 priv->mdio = mdiobus_alloc(); 1195 priv->mdio = mdiobus_alloc();
1196 if (!priv->mdio) { 1196 if (!priv->mdio) {
1197 ret = -ENOMEM; 1197 ret = -ENOMEM;
1198 goto free; 1198 goto free2;
1199 } 1199 }
1200 1200
1201 priv->mdio->name = "ethoc-mdio"; 1201 priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
1208 ret = mdiobus_register(priv->mdio); 1208 ret = mdiobus_register(priv->mdio);
1209 if (ret) { 1209 if (ret) {
1210 dev_err(&netdev->dev, "failed to register MDIO bus\n"); 1210 dev_err(&netdev->dev, "failed to register MDIO bus\n");
1211 goto free; 1211 goto free2;
1212 } 1212 }
1213 1213
1214 ret = ethoc_mdio_probe(netdev); 1214 ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@ error2:
1241error: 1241error:
1242 mdiobus_unregister(priv->mdio); 1242 mdiobus_unregister(priv->mdio);
1243 mdiobus_free(priv->mdio); 1243 mdiobus_free(priv->mdio);
1244free: 1244free2:
1245 if (priv->clk) 1245 if (priv->clk)
1246 clk_disable_unprepare(priv->clk); 1246 clk_disable_unprepare(priv->clk);
1247free:
1247 free_netdev(netdev); 1248 free_netdev(netdev);
1248out: 1249out:
1249 return ret; 1250 return ret;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f9125cf42..06f031715b57 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
205 * re-adding ourselves to the poll list. 205 * re-adding ourselves to the poll list.
206 */ 206 */
207 207
208 if (priv->tx_skb && !tx_ctrl_ct) 208 if (priv->tx_skb && !tx_ctrl_ct) {
209 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
209 napi_reschedule(napi); 210 napi_reschedule(napi);
211 }
210 } 212 }
211 213
212 return work_done; 214 return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ca2cccc594fd..fea0f330ddbd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1197 fec16_to_cpu(bdp->cbd_datlen), 1197 fec16_to_cpu(bdp->cbd_datlen),
1198 DMA_TO_DEVICE); 1198 DMA_TO_DEVICE);
1199 bdp->cbd_bufaddr = cpu_to_fec32(0); 1199 bdp->cbd_bufaddr = cpu_to_fec32(0);
1200 if (!skb) { 1200 if (!skb)
1201 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1201 goto skb_done;
1202 continue;
1203 }
1204 1202
1205 /* Check for errors. */ 1203 /* Check for errors. */
1206 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1204 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1239 1237
1240 /* Free the sk buffer associated with this last transmit */ 1238 /* Free the sk buffer associated with this last transmit */
1241 dev_kfree_skb_any(skb); 1239 dev_kfree_skb_any(skb);
1242 1240skb_done:
1243 /* Make sure the update to bdp and tx_skbuff are performed 1241 /* Make sure the update to bdp and tx_skbuff are performed
1244 * before dirty_tx 1242 * before dirty_tx
1245 */ 1243 */
@@ -2418,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
2418 return -EOPNOTSUPP; 2416 return -EOPNOTSUPP;
2419 2417
2420 if (ec->rx_max_coalesced_frames > 255) { 2418 if (ec->rx_max_coalesced_frames > 255) {
2421 pr_err("Rx coalesced frames exceed hardware limiation"); 2419 pr_err("Rx coalesced frames exceed hardware limitation\n");
2422 return -EINVAL; 2420 return -EINVAL;
2423 } 2421 }
2424 2422
2425 if (ec->tx_max_coalesced_frames > 255) { 2423 if (ec->tx_max_coalesced_frames > 255) {
2426 pr_err("Tx coalesced frame exceed hardware limiation"); 2424 pr_err("Tx coalesced frame exceed hardware limitation\n");
2427 return -EINVAL; 2425 return -EINVAL;
2428 } 2426 }
2429 2427
2430 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2428 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
2431 if (cycle > 0xFFFF) { 2429 if (cycle > 0xFFFF) {
2432 pr_err("Rx coalesed usec exceeed hardware limiation"); 2430 pr_err("Rx coalesced usec exceed hardware limitation\n");
2433 return -EINVAL; 2431 return -EINVAL;
2434 } 2432 }
2435 2433
2436 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2434 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
2437 if (cycle > 0xFFFF) { 2435 if (cycle > 0xFFFF) {
2438 pr_err("Rx coalesed usec exceeed hardware limiation"); 2436 pr_err("Rx coalesced usec exceed hardware limitation\n");
2439 return -EINVAL; 2437 return -EINVAL;
2440 } 2438 }
2441 2439
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7615e0668acb..2e6785b6e8be 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2440 tx_queue->tx_ring_size); 2440 tx_queue->tx_ring_size);
2441 2441
2442 if (likely(!nr_frags)) { 2442 if (likely(!nr_frags)) {
2443 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2443 if (likely(!do_tstamp))
2444 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2444 } else { 2445 } else {
2445 u32 lstatus_start = lstatus; 2446 u32 lstatus_start = lstatus;
2446 2447
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887873..67a648c7d3a9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
46 u32 link_stat = priv->link; 46 u32 link_stat = priv->link;
47 struct hnae_handle *h; 47 struct hnae_handle *h;
48 48
49 assert(priv && priv->ae_handle);
50 h = priv->ae_handle; 49 h = priv->ae_handle;
51 50
52 if (priv->phy) { 51 if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
646{ 645{
647 struct hns_nic_priv *priv = netdev_priv(net_dev); 646 struct hns_nic_priv *priv = netdev_priv(net_dev);
648 647
649 assert(priv);
650
651 strncpy(drvinfo->version, HNAE_DRIVER_VERSION, 648 strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
652 sizeof(drvinfo->version)); 649 sizeof(drvinfo->version));
653 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 650 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
720 struct hnae_handle *h; 717 struct hnae_handle *h;
721 struct hnae_ae_ops *ops; 718 struct hnae_ae_ops *ops;
722 719
723 assert(priv || priv->ae_handle);
724
725 h = priv->ae_handle; 720 h = priv->ae_handle;
726 ops = h->dev->ops; 721 ops = h->dev->ops;
727 722
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
780 struct hnae_ae_ops *ops; 775 struct hnae_ae_ops *ops;
781 int ret; 776 int ret;
782 777
783 assert(priv || priv->ae_handle);
784
785 ops = priv->ae_handle->dev->ops; 778 ops = priv->ae_handle->dev->ops;
786 779
787 if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) 780 if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
1111 struct hns_nic_priv *priv = netdev_priv(net_dev); 1104 struct hns_nic_priv *priv = netdev_priv(net_dev);
1112 struct hnae_ae_ops *ops; 1105 struct hnae_ae_ops *ops;
1113 1106
1114 assert(priv || priv->ae_handle);
1115
1116 ops = priv->ae_handle->dev->ops; 1107 ops = priv->ae_handle->dev->ops;
1117 1108
1118 cmd->version = HNS_CHIP_VERSION; 1109 cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
1135 struct hns_nic_priv *priv = netdev_priv(net_dev); 1126 struct hns_nic_priv *priv = netdev_priv(net_dev);
1136 struct hnae_ae_ops *ops; 1127 struct hnae_ae_ops *ops;
1137 1128
1138 assert(priv || priv->ae_handle);
1139
1140 ops = priv->ae_handle->dev->ops; 1129 ops = priv->ae_handle->dev->ops;
1141 if (!ops->get_regs_len) { 1130 if (!ops->get_regs_len) {
1142 netdev_err(net_dev, "ops->get_regs_len is null!\n"); 1131 netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 01fccec632ec..466939f8f0cf 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
190 hwbm_pool->construct = mvneta_bm_construct; 190 hwbm_pool->construct = mvneta_bm_construct;
191 hwbm_pool->priv = new_pool; 191 hwbm_pool->priv = new_pool;
192 spin_lock_init(&hwbm_pool->lock);
192 193
193 /* Create new pool */ 194 /* Create new pool */
194 err = mvneta_bm_pool_create(priv, new_pool); 195 err = mvneta_bm_pool_create(priv, new_pool);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c984462fad2a..4763252bbf85 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
133static void mtk_phy_link_adjust(struct net_device *dev) 133static void mtk_phy_link_adjust(struct net_device *dev)
134{ 134{
135 struct mtk_mac *mac = netdev_priv(dev); 135 struct mtk_mac *mac = netdev_priv(dev);
136 u16 lcl_adv = 0, rmt_adv = 0;
137 u8 flowctrl;
136 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | 138 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
137 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | 139 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
138 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | 140 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
150 if (mac->phy_dev->link) 152 if (mac->phy_dev->link)
151 mcr |= MAC_MCR_FORCE_LINK; 153 mcr |= MAC_MCR_FORCE_LINK;
152 154
153 if (mac->phy_dev->duplex) 155 if (mac->phy_dev->duplex) {
154 mcr |= MAC_MCR_FORCE_DPX; 156 mcr |= MAC_MCR_FORCE_DPX;
155 157
156 if (mac->phy_dev->pause) 158 if (mac->phy_dev->pause)
157 mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; 159 rmt_adv = LPA_PAUSE_CAP;
160 if (mac->phy_dev->asym_pause)
161 rmt_adv |= LPA_PAUSE_ASYM;
162
163 if (mac->phy_dev->advertising & ADVERTISED_Pause)
164 lcl_adv |= ADVERTISE_PAUSE_CAP;
165 if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
166 lcl_adv |= ADVERTISE_PAUSE_ASYM;
167
168 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
169
170 if (flowctrl & FLOW_CTRL_TX)
171 mcr |= MAC_MCR_FORCE_TX_FC;
172 if (flowctrl & FLOW_CTRL_RX)
173 mcr |= MAC_MCR_FORCE_RX_FC;
174
175 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
176 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
177 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
178 }
158 179
159 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 180 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
160 181
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
208 u32 val, ge_mode; 229 u32 val, ge_mode;
209 230
210 np = of_parse_phandle(mac->of_node, "phy-handle", 0); 231 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
232 if (!np && of_phy_is_fixed_link(mac->of_node))
233 if (!of_phy_register_fixed_link(mac->of_node))
234 np = of_node_get(mac->of_node);
211 if (!np) 235 if (!np)
212 return -ENODEV; 236 return -ENODEV;
213 237
214 switch (of_get_phy_mode(np)) { 238 switch (of_get_phy_mode(np)) {
239 case PHY_INTERFACE_MODE_RGMII_TXID:
240 case PHY_INTERFACE_MODE_RGMII_RXID:
241 case PHY_INTERFACE_MODE_RGMII_ID:
215 case PHY_INTERFACE_MODE_RGMII: 242 case PHY_INTERFACE_MODE_RGMII:
216 ge_mode = 0; 243 ge_mode = 0;
217 break; 244 break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
236 mac->phy_dev->autoneg = AUTONEG_ENABLE; 263 mac->phy_dev->autoneg = AUTONEG_ENABLE;
237 mac->phy_dev->speed = 0; 264 mac->phy_dev->speed = 0;
238 mac->phy_dev->duplex = 0; 265 mac->phy_dev->duplex = 0;
239 mac->phy_dev->supported &= PHY_BASIC_FEATURES; 266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause;
240 mac->phy_dev->advertising = mac->phy_dev->supported | 268 mac->phy_dev->advertising = mac->phy_dev->supported |
241 ADVERTISED_Autoneg; 269 ADVERTISED_Autoneg;
242 phy_start_aneg(mac->phy_dev); 270 phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
280 return 0; 308 return 0;
281 309
282err_free_bus: 310err_free_bus:
283 kfree(eth->mii_bus); 311 mdiobus_free(eth->mii_bus);
284 312
285err_put_node: 313err_put_node:
286 of_node_put(mii_np); 314 of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
295 323
296 mdiobus_unregister(eth->mii_bus); 324 mdiobus_unregister(eth->mii_bus);
297 of_node_put(eth->mii_bus->dev.of_node); 325 of_node_put(eth->mii_bus->dev.of_node);
298 kfree(eth->mii_bus); 326 mdiobus_free(eth->mii_bus);
299} 327}
300 328
301static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) 329static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb323..fc95affaf76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
362 362
363 for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) 363 for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
364 if (bitmap_iterator_test(&it)) 364 if (bitmap_iterator_test(&it))
365 data[index++] = ((unsigned long *)&priv->stats)[i]; 365 data[index++] = ((unsigned long *)&dev->stats)[i];
366 366
367 for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) 367 for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
368 if (bitmap_iterator_test(&it)) 368 if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 92e0624f4cf0..19ceced6736c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1296} 1296}
1297 1297
1298 1298
1299static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 1299static struct rtnl_link_stats64 *
1300mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1300{ 1301{
1301 struct mlx4_en_priv *priv = netdev_priv(dev); 1302 struct mlx4_en_priv *priv = netdev_priv(dev);
1302 1303
1303 spin_lock_bh(&priv->stats_lock); 1304 spin_lock_bh(&priv->stats_lock);
1304 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 1305 netdev_stats_to_stats64(stats, &dev->stats);
1305 spin_unlock_bh(&priv->stats_lock); 1306 spin_unlock_bh(&priv->stats_lock);
1306 1307
1307 return &priv->ret_stats; 1308 return stats;
1308} 1309}
1309 1310
1310static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1311static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1876 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1877 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1877 en_dbg(HW, priv, "Failed dumping statistics\n"); 1878 en_dbg(HW, priv, "Failed dumping statistics\n");
1878 1879
1879 memset(&priv->stats, 0, sizeof(priv->stats));
1880 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1880 memset(&priv->pstats, 0, sizeof(priv->pstats));
1881 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1881 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1882 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1882 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1892 priv->tx_ring[i]->bytes = 0; 1892 priv->tx_ring[i]->bytes = 0;
1893 priv->tx_ring[i]->packets = 0; 1893 priv->tx_ring[i]->packets = 0;
1894 priv->tx_ring[i]->tx_csum = 0; 1894 priv->tx_ring[i]->tx_csum = 0;
1895 priv->tx_ring[i]->tx_dropped = 0;
1896 priv->tx_ring[i]->queue_stopped = 0;
1897 priv->tx_ring[i]->wake_queue = 0;
1898 priv->tx_ring[i]->tso_packets = 0;
1899 priv->tx_ring[i]->xmit_more = 0;
1895 } 1900 }
1896 for (i = 0; i < priv->rx_ring_num; i++) { 1901 for (i = 0; i < priv->rx_ring_num; i++) {
1897 priv->rx_ring[i]->bytes = 0; 1902 priv->rx_ring[i]->bytes = 0;
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2482 .ndo_stop = mlx4_en_close, 2487 .ndo_stop = mlx4_en_close,
2483 .ndo_start_xmit = mlx4_en_xmit, 2488 .ndo_start_xmit = mlx4_en_xmit,
2484 .ndo_select_queue = mlx4_en_select_queue, 2489 .ndo_select_queue = mlx4_en_select_queue,
2485 .ndo_get_stats = mlx4_en_get_stats, 2490 .ndo_get_stats64 = mlx4_en_get_stats64,
2486 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2491 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2487 .ndo_set_mac_address = mlx4_en_set_mac, 2492 .ndo_set_mac_address = mlx4_en_set_mac,
2488 .ndo_validate_addr = eth_validate_addr, 2493 .ndo_validate_addr = eth_validate_addr,
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2514 .ndo_stop = mlx4_en_close, 2519 .ndo_stop = mlx4_en_close,
2515 .ndo_start_xmit = mlx4_en_xmit, 2520 .ndo_start_xmit = mlx4_en_xmit,
2516 .ndo_select_queue = mlx4_en_select_queue, 2521 .ndo_select_queue = mlx4_en_select_queue,
2517 .ndo_get_stats = mlx4_en_get_stats, 2522 .ndo_get_stats64 = mlx4_en_get_stats64,
2518 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2523 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2519 .ndo_set_mac_address = mlx4_en_set_mac, 2524 .ndo_set_mac_address = mlx4_en_set_mac,
2520 .ndo_validate_addr = eth_validate_addr, 2525 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e678b8..5aa8b751f417 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
152 struct mlx4_counter tmp_counter_stats; 152 struct mlx4_counter tmp_counter_stats;
153 struct mlx4_en_stat_out_mbox *mlx4_en_stats; 153 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
154 struct mlx4_en_stat_out_flow_control_mbox *flowstats; 154 struct mlx4_en_stat_out_flow_control_mbox *flowstats;
155 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); 155 struct net_device *dev = mdev->pndev[port];
156 struct net_device_stats *stats = &priv->stats; 156 struct mlx4_en_priv *priv = netdev_priv(dev);
157 struct net_device_stats *stats = &dev->stats;
157 struct mlx4_cmd_mailbox *mailbox; 158 struct mlx4_cmd_mailbox *mailbox;
158 u64 in_mod = reset << 8 | port; 159 u64 in_mod = reset << 8 | port;
159 int err; 160 int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 } 189 }
189 stats->tx_packets = 0; 190 stats->tx_packets = 0;
190 stats->tx_bytes = 0; 191 stats->tx_bytes = 0;
192 stats->tx_dropped = 0;
191 priv->port_stats.tx_chksum_offload = 0; 193 priv->port_stats.tx_chksum_offload = 0;
192 priv->port_stats.queue_stopped = 0; 194 priv->port_stats.queue_stopped = 0;
193 priv->port_stats.wake_queue = 0; 195 priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
199 201
200 stats->tx_packets += ring->packets; 202 stats->tx_packets += ring->packets;
201 stats->tx_bytes += ring->bytes; 203 stats->tx_bytes += ring->bytes;
204 stats->tx_dropped += ring->tx_dropped;
202 priv->port_stats.tx_chksum_offload += ring->tx_csum; 205 priv->port_stats.tx_chksum_offload += ring->tx_csum;
203 priv->port_stats.queue_stopped += ring->queue_stopped; 206 priv->port_stats.queue_stopped += ring->queue_stopped;
204 priv->port_stats.wake_queue += ring->wake_queue; 207 priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
237 stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, 240 stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
238 &mlx4_en_stats->MCAST_prio_1, 241 &mlx4_en_stats->MCAST_prio_1,
239 NUM_PRIORITIES); 242 NUM_PRIORITIES);
240 stats->collisions = 0;
241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + 243 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped; 244 sw_rx_dropped;
243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 245 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
244 stats->rx_over_errors = 0;
245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 246 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
246 stats->rx_frame_errors = 0;
247 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 247 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
248 stats->rx_missed_errors = 0; 248 stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
249 stats->tx_aborted_errors = 0;
250 stats->tx_carrier_errors = 0;
251 stats->tx_fifo_errors = 0;
252 stats->tx_heartbeat_errors = 0;
253 stats->tx_window_errors = 0;
254 stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
255 249
256 /* RX stats */ 250 /* RX stats */
257 priv->pkstats.rx_multicast_packets = stats->multicast; 251 priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f6e61570cb2c..76aa4d27183c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
726 bool inline_ok; 726 bool inline_ok;
727 u32 ring_cons; 727 u32 ring_cons;
728 728
729 if (!priv->port_up)
730 goto tx_drop;
731
732 tx_ind = skb_get_queue_mapping(skb); 729 tx_ind = skb_get_queue_mapping(skb);
733 ring = priv->tx_ring[tx_ind]; 730 ring = priv->tx_ring[tx_ind];
734 731
732 if (!priv->port_up)
733 goto tx_drop;
734
735 /* fetch ring->cons far ahead before needing it to avoid stall */ 735 /* fetch ring->cons far ahead before needing it to avoid stall */
736 ring_cons = ACCESS_ONCE(ring->cons); 736 ring_cons = ACCESS_ONCE(ring->cons);
737 737
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
1030 1030
1031tx_drop: 1031tx_drop:
1032 dev_kfree_skb_any(skb); 1032 dev_kfree_skb_any(skb);
1033 priv->stats.tx_dropped++; 1033 ring->tx_dropped++;
1034 return NETDEV_TX_OK; 1034 return NETDEV_TX_OK;
1035} 1035}
1036 1036
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index cc84e09f324a..467d47ed2c39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
270 unsigned long tx_csum; 270 unsigned long tx_csum;
271 unsigned long tso_packets; 271 unsigned long tso_packets;
272 unsigned long xmit_more; 272 unsigned long xmit_more;
273 unsigned int tx_dropped;
273 struct mlx4_bf bf; 274 struct mlx4_bf bf;
274 unsigned long queue_stopped; 275 unsigned long queue_stopped;
275 276
@@ -482,8 +483,6 @@ struct mlx4_en_priv {
482 struct mlx4_en_port_profile *prof; 483 struct mlx4_en_port_profile *prof;
483 struct net_device *dev; 484 struct net_device *dev;
484 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 485 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
485 struct net_device_stats stats;
486 struct net_device_stats ret_stats;
487 struct mlx4_en_port_state port_state; 486 struct mlx4_en_port_state port_state;
488 spinlock_t stats_lock; 487 spinlock_t stats_lock;
489 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; 488 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
205 goto free_uar; 205 goto free_uar;
206 } 206 }
207 207
208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); 208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
209 uar->index << PAGE_SHIFT,
210 PAGE_SIZE);
209 if (!uar->bf_map) { 211 if (!uar->bf_map) {
210 err = -ENOMEM; 212 err = -ENOMEM;
211 goto unamp_uar; 213 goto unamp_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index fd4392999eee..f5c8d5db25a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
3192 flush_workqueue(priv->wq); 3192 flush_workqueue(priv->wq);
3193 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 3193 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
3194 netif_device_detach(netdev); 3194 netif_device_detach(netdev);
3195 mutex_lock(&priv->state_lock); 3195 mlx5e_close(netdev);
3196 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
3197 mlx5e_close_locked(netdev);
3198 mutex_unlock(&priv->state_lock);
3199 } else { 3196 } else {
3200 unregister_netdev(netdev); 3197 unregister_netdev(netdev);
3201 } 3198 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 229ab16fb8d3..b000ddc29553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
317 while ((sq->pc & wq->sz_m1) > sq->edge) 317 while ((sq->pc & wq->sz_m1) > sq->edge)
318 mlx5e_send_nop(sq, false); 318 mlx5e_send_nop(sq, false);
319 319
320 sq->bf_budget = bf ? sq->bf_budget - 1 : 0; 320 if (bf)
321 sq->bf_budget--;
321 322
322 sq->stats.packets++; 323 sq->stats.packets++;
323 sq->stats.bytes += num_bytes; 324 sq->stats.bytes += num_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b84a6918a700..aebbd6ccb9fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
383 match_v, 383 match_v,
384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
385 0, &dest); 385 0, &dest);
386 if (IS_ERR_OR_NULL(flow_rule)) { 386 if (IS_ERR(flow_rule)) {
387 pr_warn( 387 pr_warn(
388 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 388 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
389 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 389 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
457 457
458 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 458 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
459 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); 459 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
460 if (IS_ERR_OR_NULL(fdb)) { 460 if (IS_ERR(fdb)) {
461 err = PTR_ERR(fdb); 461 err = PTR_ERR(fdb);
462 esw_warn(dev, "Failed to create FDB Table err %d\n", err); 462 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
463 goto out; 463 goto out;
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
474 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); 474 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
475 eth_broadcast_addr(dmac); 475 eth_broadcast_addr(dmac);
476 g = mlx5_create_flow_group(fdb, flow_group_in); 476 g = mlx5_create_flow_group(fdb, flow_group_in);
477 if (IS_ERR_OR_NULL(g)) { 477 if (IS_ERR(g)) {
478 err = PTR_ERR(g); 478 err = PTR_ERR(g);
479 esw_warn(dev, "Failed to create flow group err(%d)\n", err); 479 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
480 goto out; 480 goto out;
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
489 eth_zero_addr(dmac); 489 eth_zero_addr(dmac);
490 dmac[0] = 0x01; 490 dmac[0] = 0x01;
491 g = mlx5_create_flow_group(fdb, flow_group_in); 491 g = mlx5_create_flow_group(fdb, flow_group_in);
492 if (IS_ERR_OR_NULL(g)) { 492 if (IS_ERR(g)) {
493 err = PTR_ERR(g); 493 err = PTR_ERR(g);
494 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); 494 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
495 goto out; 495 goto out;
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
506 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); 506 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
507 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); 507 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
508 g = mlx5_create_flow_group(fdb, flow_group_in); 508 g = mlx5_create_flow_group(fdb, flow_group_in);
509 if (IS_ERR_OR_NULL(g)) { 509 if (IS_ERR(g)) {
510 err = PTR_ERR(g); 510 err = PTR_ERR(g);
511 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); 511 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
512 goto out; 512 goto out;
@@ -529,7 +529,7 @@ out:
529 } 529 }
530 } 530 }
531 531
532 kfree(flow_group_in); 532 kvfree(flow_group_in);
533 return err; 533 return err;
534} 534}
535 535
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
651 esw_fdb_set_vport_rule(esw, 651 esw_fdb_set_vport_rule(esw,
652 mac, 652 mac,
653 vport_idx); 653 vport_idx);
654 iter_vaddr->mc_promisc = true;
654 break; 655 break;
655 case MLX5_ACTION_DEL: 656 case MLX5_ACTION_DEL:
656 if (!iter_vaddr) 657 if (!iter_vaddr)
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
1060 return; 1061 return;
1061 1062
1062 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1063 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1063 if (IS_ERR_OR_NULL(acl)) { 1064 if (IS_ERR(acl)) {
1064 err = PTR_ERR(acl); 1065 err = PTR_ERR(acl);
1065 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", 1066 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
1066 vport->vport, err); 1067 vport->vport, err);
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
1075 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1076 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1076 1077
1077 vlan_grp = mlx5_create_flow_group(acl, flow_group_in); 1078 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
1078 if (IS_ERR_OR_NULL(vlan_grp)) { 1079 if (IS_ERR(vlan_grp)) {
1079 err = PTR_ERR(vlan_grp); 1080 err = PTR_ERR(vlan_grp);
1080 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", 1081 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
1081 vport->vport, err); 1082 vport->vport, err);
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
1086 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); 1087 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1087 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 1088 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1088 drop_grp = mlx5_create_flow_group(acl, flow_group_in); 1089 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
1089 if (IS_ERR_OR_NULL(drop_grp)) { 1090 if (IS_ERR(drop_grp)) {
1090 err = PTR_ERR(drop_grp); 1091 err = PTR_ERR(drop_grp);
1091 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", 1092 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1092 vport->vport, err); 1093 vport->vport, err);
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
1097 vport->egress.drop_grp = drop_grp; 1098 vport->egress.drop_grp = drop_grp;
1098 vport->egress.allowed_vlans_grp = vlan_grp; 1099 vport->egress.allowed_vlans_grp = vlan_grp;
1099out: 1100out:
1100 kfree(flow_group_in); 1101 kvfree(flow_group_in);
1101 if (err && !IS_ERR_OR_NULL(vlan_grp)) 1102 if (err && !IS_ERR_OR_NULL(vlan_grp))
1102 mlx5_destroy_flow_group(vlan_grp); 1103 mlx5_destroy_flow_group(vlan_grp);
1103 if (err && !IS_ERR_OR_NULL(acl)) 1104 if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1174 return; 1175 return;
1175 1176
1176 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1177 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1177 if (IS_ERR_OR_NULL(acl)) { 1178 if (IS_ERR(acl)) {
1178 err = PTR_ERR(acl); 1179 err = PTR_ERR(acl);
1179 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", 1180 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1180 vport->vport, err); 1181 vport->vport, err);
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1192 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1193 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1193 1194
1194 g = mlx5_create_flow_group(acl, flow_group_in); 1195 g = mlx5_create_flow_group(acl, flow_group_in);
1195 if (IS_ERR_OR_NULL(g)) { 1196 if (IS_ERR(g)) {
1196 err = PTR_ERR(g); 1197 err = PTR_ERR(g);
1197 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", 1198 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1198 vport->vport, err); 1199 vport->vport, err);
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1207 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 1208 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1208 1209
1209 g = mlx5_create_flow_group(acl, flow_group_in); 1210 g = mlx5_create_flow_group(acl, flow_group_in);
1210 if (IS_ERR_OR_NULL(g)) { 1211 if (IS_ERR(g)) {
1211 err = PTR_ERR(g); 1212 err = PTR_ERR(g);
1212 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", 1213 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1213 vport->vport, err); 1214 vport->vport, err);
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1223 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); 1224 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1224 1225
1225 g = mlx5_create_flow_group(acl, flow_group_in); 1226 g = mlx5_create_flow_group(acl, flow_group_in);
1226 if (IS_ERR_OR_NULL(g)) { 1227 if (IS_ERR(g)) {
1227 err = PTR_ERR(g); 1228 err = PTR_ERR(g);
1228 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", 1229 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1229 vport->vport, err); 1230 vport->vport, err);
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1236 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); 1237 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1237 1238
1238 g = mlx5_create_flow_group(acl, flow_group_in); 1239 g = mlx5_create_flow_group(acl, flow_group_in);
1239 if (IS_ERR_OR_NULL(g)) { 1240 if (IS_ERR(g)) {
1240 err = PTR_ERR(g); 1241 err = PTR_ERR(g);
1241 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", 1242 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1242 vport->vport, err); 1243 vport->vport, err);
@@ -1259,7 +1260,7 @@ out:
1259 mlx5_destroy_flow_table(vport->ingress.acl); 1260 mlx5_destroy_flow_table(vport->ingress.acl);
1260 } 1261 }
1261 1262
1262 kfree(flow_group_in); 1263 kvfree(flow_group_in);
1263} 1264}
1264 1265
1265static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 1266static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1363 match_v, 1364 match_v,
1364 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1365 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1365 0, NULL); 1366 0, NULL);
1366 if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { 1367 if (IS_ERR(vport->ingress.allow_rule)) {
1367 err = PTR_ERR(vport->ingress.allow_rule); 1368 err = PTR_ERR(vport->ingress.allow_rule);
1368 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", 1369 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
1369 vport->vport, err); 1370 vport->vport, err);
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1380 match_v, 1381 match_v,
1381 MLX5_FLOW_CONTEXT_ACTION_DROP, 1382 MLX5_FLOW_CONTEXT_ACTION_DROP,
1382 0, NULL); 1383 0, NULL);
1383 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { 1384 if (IS_ERR(vport->ingress.drop_rule)) {
1384 err = PTR_ERR(vport->ingress.drop_rule); 1385 err = PTR_ERR(vport->ingress.drop_rule);
1385 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", 1386 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
1386 vport->vport, err); 1387 vport->vport, err);
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1439 match_v, 1440 match_v,
1440 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1441 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1441 0, NULL); 1442 0, NULL);
1442 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { 1443 if (IS_ERR(vport->egress.allowed_vlan)) {
1443 err = PTR_ERR(vport->egress.allowed_vlan); 1444 err = PTR_ERR(vport->egress.allowed_vlan);
1444 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", 1445 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1445 vport->vport, err); 1446 vport->vport, err);
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1457 match_v, 1458 match_v,
1458 MLX5_FLOW_CONTEXT_ACTION_DROP, 1459 MLX5_FLOW_CONTEXT_ACTION_DROP,
1459 0, NULL); 1460 0, NULL);
1460 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { 1461 if (IS_ERR(vport->egress.drop_rule)) {
1461 err = PTR_ERR(vport->egress.drop_rule); 1462 err = PTR_ERR(vport->egress.drop_rule);
1462 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", 1463 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
1463 vport->vport, err); 1464 vport->vport, err);
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1491 1492
1492 /* Sync with current vport context */ 1493 /* Sync with current vport context */
1493 vport->enabled_events = enable_events; 1494 vport->enabled_events = enable_events;
1494 esw_vport_change_handle_locked(vport);
1495
1496 vport->enabled = true; 1495 vport->enabled = true;
1497 1496
1498 /* only PF is trusted by default */ 1497 /* only PF is trusted by default */
1499 vport->trusted = (vport_num) ? false : true; 1498 vport->trusted = (vport_num) ? false : true;
1500 1499 esw_vport_change_handle_locked(vport);
1501 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
1502 1500
1503 esw->enabled_vports++; 1501 esw->enabled_vports++;
1504 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 1502 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1728 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) 1726 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1729#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) 1727#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1730 1728
1729static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1730{
1731 ((u8 *)node_guid)[7] = mac[0];
1732 ((u8 *)node_guid)[6] = mac[1];
1733 ((u8 *)node_guid)[5] = mac[2];
1734 ((u8 *)node_guid)[4] = 0xff;
1735 ((u8 *)node_guid)[3] = 0xfe;
1736 ((u8 *)node_guid)[2] = mac[3];
1737 ((u8 *)node_guid)[1] = mac[4];
1738 ((u8 *)node_guid)[0] = mac[5];
1739}
1740
1731int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1741int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1732 int vport, u8 mac[ETH_ALEN]) 1742 int vport, u8 mac[ETH_ALEN])
1733{ 1743{
1734 int err = 0;
1735 struct mlx5_vport *evport; 1744 struct mlx5_vport *evport;
1745 u64 node_guid;
1746 int err = 0;
1736 1747
1737 if (!ESW_ALLOWED(esw)) 1748 if (!ESW_ALLOWED(esw))
1738 return -EPERM; 1749 return -EPERM;
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1756 return err; 1767 return err;
1757 } 1768 }
1758 1769
1770 node_guid_gen_from_mac(&node_guid, mac);
1771 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1772 if (err)
1773 mlx5_core_warn(esw->dev,
1774 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1775 vport, err);
1776
1759 mutex_lock(&esw->state_lock); 1777 mutex_lock(&esw->state_lock);
1760 if (evport->enabled) 1778 if (evport->enabled)
1761 err = esw_vport_ingress_config(esw, evport); 1779 err = esw_vport_ingress_config(esw, evport);
1762 mutex_unlock(&esw->state_lock); 1780 mutex_unlock(&esw->state_lock);
1763
1764 return err; 1781 return err;
1765} 1782}
1766 1783
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8b5f0b2c0d5c..e912a3d2505e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1292 ft->id); 1292 ft->id);
1293 return err; 1293 return err;
1294 } 1294 }
1295 root->root_ft = new_root_ft;
1296 } 1295 }
1296 root->root_ft = new_root_ft;
1297 return 0; 1297 return 0;
1298} 1298}
1299 1299
@@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
1767 1767
1768void mlx5_cleanup_fs(struct mlx5_core_dev *dev) 1768void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1769{ 1769{
1770 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1771 return;
1772
1770 cleanup_root_ns(dev); 1773 cleanup_root_ns(dev);
1771 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); 1774 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
1772 cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); 1775 cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
1828{ 1831{
1829 int err = 0; 1832 int err = 0;
1830 1833
1834 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1835 return 0;
1836
1831 err = mlx5_init_fc_stats(dev); 1837 err = mlx5_init_fc_stats(dev);
1832 if (err) 1838 if (err)
1833 return err; 1839 return err;
1834 1840
1835 if (MLX5_CAP_GEN(dev, nic_flow_table)) { 1841 if (MLX5_CAP_GEN(dev, nic_flow_table) &&
1842 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
1836 err = init_root_ns(dev); 1843 err = init_root_ns(dev);
1837 if (err) 1844 if (err)
1838 goto err; 1845 goto err;
1839 } 1846 }
1847
1840 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 1848 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1841 err = init_fdb_root_ns(dev); 1849 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
1842 if (err) 1850 err = init_fdb_root_ns(dev);
1843 goto err; 1851 if (err)
1844 } 1852 goto err;
1845 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1853 }
1846 err = init_egress_acl_root_ns(dev); 1854 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1847 if (err) 1855 err = init_egress_acl_root_ns(dev);
1848 goto err; 1856 if (err)
1849 } 1857 goto err;
1850 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1858 }
1851 err = init_ingress_acl_root_ns(dev); 1859 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1852 if (err) 1860 err = init_ingress_acl_root_ns(dev);
1853 goto err; 1861 if (err)
1862 goto err;
1863 }
1854 } 1864 }
1855 1865
1856 return 0; 1866 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b720a274220d..b82d65802d96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
418 if (out.hdr.status) 418 if (out.hdr.status)
419 err = mlx5_cmd_status_to_err(&out.hdr); 419 err = mlx5_cmd_status_to_err(&out.hdr);
420 else 420 else
421 *xrcdn = be32_to_cpu(out.xrcdn); 421 *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
422 422
423 return err; 423 return err;
424} 424}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb897..daf44cd4c566 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
508} 508}
509EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); 509EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
510 510
511int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
512 u32 vport, u64 node_guid)
513{
514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 void *nic_vport_context;
516 u8 *guid;
517 void *in;
518 int err;
519
520 if (!vport)
521 return -EINVAL;
522 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
523 return -EACCES;
524 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
525 return -ENOTSUPP;
526
527 in = mlx5_vzalloc(inlen);
528 if (!in)
529 return -ENOMEM;
530
531 MLX5_SET(modify_nic_vport_context_in, in,
532 field_select.node_guid, 1);
533 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
534 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
535
536 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
537 in, nic_vport_context);
538 guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
539 node_guid);
540 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
541
542 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
543
544 kvfree(in);
545
546 return err;
547}
548
511int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 549int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
512 u16 *qkey_viol_cntr) 550 u16 *qkey_viol_cntr)
513{ 551{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4a7273771028..6f9e3ddff4a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248} 248}
249 249
250static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 250static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
251 u8 swid)
251{ 252{
252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253 char pspa_pl[MLXSW_REG_PSPA_LEN]; 253 char pspa_pl[MLXSW_REG_PSPA_LEN];
254 254
255 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 255 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257} 257}
258 258
259static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
260{
261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
262
263 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
264 swid);
265}
266
259static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 267static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260 bool enable) 268 bool enable)
261{ 269{
@@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306} 314}
307 315
308static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 316static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309 u8 local_port, u8 *p_module, 317 u8 local_port, u8 *p_module,
310 u8 *p_width, u8 *p_lane) 318 u8 *p_width, u8 *p_lane)
311{ 319{
312 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 320 char pmlp_pl[MLXSW_REG_PMLP_LEN];
313 int err; 321 int err;
@@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
322 return 0; 330 return 0;
323} 331}
324 332
325static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326 u8 local_port, u8 *p_module,
327 u8 *p_width)
328{
329 u8 lane;
330
331 return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332 p_width, &lane);
333}
334
335static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 333static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336 u8 module, u8 width, u8 lane) 334 u8 module, u8 width, u8 lane)
337{ 335{
@@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
949 size_t len) 947 size_t len)
950{ 948{
951 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 949 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
952 u8 module, width, lane; 950 u8 module = mlxsw_sp_port->mapping.module;
951 u8 width = mlxsw_sp_port->mapping.width;
952 u8 lane = mlxsw_sp_port->mapping.lane;
953 int err; 953 int err;
954 954
955 err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
956 mlxsw_sp_port->local_port,
957 &module, &width, &lane);
958 if (err) {
959 netdev_err(dev, "Failed to retrieve module information\n");
960 return err;
961 }
962
963 if (!mlxsw_sp_port->split) 955 if (!mlxsw_sp_port->split)
964 err = snprintf(name, len, "p%d", module + 1); 956 err = snprintf(name, len, "p%d", module + 1);
965 else 957 else
@@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1681 return 0; 1673 return 0;
1682} 1674}
1683 1675
1684static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1676static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1685 bool split, u8 module, u8 width) 1677 bool split, u8 module, u8 width, u8 lane)
1686{ 1678{
1687 struct mlxsw_sp_port *mlxsw_sp_port; 1679 struct mlxsw_sp_port *mlxsw_sp_port;
1688 struct net_device *dev; 1680 struct net_device *dev;
@@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1697 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1689 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1698 mlxsw_sp_port->local_port = local_port; 1690 mlxsw_sp_port->local_port = local_port;
1699 mlxsw_sp_port->split = split; 1691 mlxsw_sp_port->split = split;
1692 mlxsw_sp_port->mapping.module = module;
1693 mlxsw_sp_port->mapping.width = width;
1694 mlxsw_sp_port->mapping.lane = lane;
1700 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1695 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1701 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1696 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1702 if (!mlxsw_sp_port->active_vlans) { 1697 if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc:
1839 return err; 1834 return err;
1840} 1835}
1841 1836
1842static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1843 bool split, u8 module, u8 width, u8 lane)
1844{
1845 int err;
1846
1847 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1848 lane);
1849 if (err)
1850 return err;
1851
1852 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1853 width);
1854 if (err)
1855 goto err_port_create;
1856
1857 return 0;
1858
1859err_port_create:
1860 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1861 return err;
1862}
1863
1864static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1837static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1865{ 1838{
1866 struct net_device *dev = mlxsw_sp_port->dev; 1839 struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1909 1882
1910static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1883static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1911{ 1884{
1885 u8 module, width, lane;
1912 size_t alloc_size; 1886 size_t alloc_size;
1913 u8 module, width;
1914 int i; 1887 int i;
1915 int err; 1888 int err;
1916 1889
@@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1921 1894
1922 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1895 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1923 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 1896 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1924 &width); 1897 &width, &lane);
1925 if (err) 1898 if (err)
1926 goto err_port_module_info_get; 1899 goto err_port_module_info_get;
1927 if (!width) 1900 if (!width)
1928 continue; 1901 continue;
1929 mlxsw_sp->port_to_module[i] = module; 1902 mlxsw_sp->port_to_module[i] = module;
1930 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); 1903 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1904 lane);
1931 if (err) 1905 if (err)
1932 goto err_port_create; 1906 goto err_port_create;
1933 } 1907 }
@@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1948 return local_port - offset; 1922 return local_port - offset;
1949} 1923}
1950 1924
1925static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1926 u8 module, unsigned int count)
1927{
1928 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1929 int err, i;
1930
1931 for (i = 0; i < count; i++) {
1932 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1933 width, i * width);
1934 if (err)
1935 goto err_port_module_map;
1936 }
1937
1938 for (i = 0; i < count; i++) {
1939 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1940 if (err)
1941 goto err_port_swid_set;
1942 }
1943
1944 for (i = 0; i < count; i++) {
1945 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1946 module, width, i * width);
1947 if (err)
1948 goto err_port_create;
1949 }
1950
1951 return 0;
1952
1953err_port_create:
1954 for (i--; i >= 0; i--)
1955 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1956 i = count;
1957err_port_swid_set:
1958 for (i--; i >= 0; i--)
1959 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1960 MLXSW_PORT_SWID_DISABLED_PORT);
1961 i = count;
1962err_port_module_map:
1963 for (i--; i >= 0; i--)
1964 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1965 return err;
1966}
1967
1968static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1969 u8 base_port, unsigned int count)
1970{
1971 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1972 int i;
1973
1974 /* Split by four means we need to re-create two ports, otherwise
1975 * only one.
1976 */
1977 count = count / 2;
1978
1979 for (i = 0; i < count; i++) {
1980 local_port = base_port + i * 2;
1981 module = mlxsw_sp->port_to_module[local_port];
1982
1983 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1984 0);
1985 }
1986
1987 for (i = 0; i < count; i++)
1988 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1989
1990 for (i = 0; i < count; i++) {
1991 local_port = base_port + i * 2;
1992 module = mlxsw_sp->port_to_module[local_port];
1993
1994 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1995 width, 0);
1996 }
1997}
1998
1951static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1999static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1952 unsigned int count) 2000 unsigned int count)
1953{ 2001{
1954 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2002 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1955 struct mlxsw_sp_port *mlxsw_sp_port; 2003 struct mlxsw_sp_port *mlxsw_sp_port;
1956 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1957 u8 module, cur_width, base_port; 2004 u8 module, cur_width, base_port;
1958 int i; 2005 int i;
1959 int err; 2006 int err;
@@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1965 return -EINVAL; 2012 return -EINVAL;
1966 } 2013 }
1967 2014
2015 module = mlxsw_sp_port->mapping.module;
2016 cur_width = mlxsw_sp_port->mapping.width;
2017
1968 if (count != 2 && count != 4) { 2018 if (count != 2 && count != 4) {
1969 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2019 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1970 return -EINVAL; 2020 return -EINVAL;
1971 } 2021 }
1972 2022
1973 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1974 &cur_width);
1975 if (err) {
1976 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1977 return err;
1978 }
1979
1980 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2023 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1981 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2024 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1982 return -EINVAL; 2025 return -EINVAL;
@@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2001 for (i = 0; i < count; i++) 2044 for (i = 0; i < count; i++)
2002 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2045 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2003 2046
2004 for (i = 0; i < count; i++) { 2047 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2005 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2048 if (err) {
2006 module, width, i * width); 2049 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2007 if (err) { 2050 goto err_port_split_create;
2008 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
2009 goto err_port_create;
2010 }
2011 } 2051 }
2012 2052
2013 return 0; 2053 return 0;
2014 2054
2015err_port_create: 2055err_port_split_create:
2016 for (i--; i >= 0; i--) 2056 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2017 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2018 for (i = 0; i < count / 2; i++) {
2019 module = mlxsw_sp->port_to_module[base_port + i * 2];
2020 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
2021 module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
2022 }
2023 return err; 2057 return err;
2024} 2058}
2025 2059
@@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2027{ 2061{
2028 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2062 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2029 struct mlxsw_sp_port *mlxsw_sp_port; 2063 struct mlxsw_sp_port *mlxsw_sp_port;
2030 u8 module, cur_width, base_port; 2064 u8 cur_width, base_port;
2031 unsigned int count; 2065 unsigned int count;
2032 int i; 2066 int i;
2033 int err;
2034 2067
2035 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2068 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2036 if (!mlxsw_sp_port) { 2069 if (!mlxsw_sp_port) {
@@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2044 return -EINVAL; 2077 return -EINVAL;
2045 } 2078 }
2046 2079
2047 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, 2080 cur_width = mlxsw_sp_port->mapping.width;
2048 &cur_width);
2049 if (err) {
2050 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
2051 return err;
2052 }
2053 count = cur_width == 1 ? 4 : 2; 2081 count = cur_width == 1 ? 4 : 2;
2054 2082
2055 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2083 base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2061 for (i = 0; i < count; i++) 2089 for (i = 0; i < count; i++)
2062 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2090 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2063 2091
2064 for (i = 0; i < count / 2; i++) { 2092 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2065 module = mlxsw_sp->port_to_module[base_port + i * 2];
2066 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
2067 module, MLXSW_PORT_MODULE_MAX_WIDTH,
2068 0);
2069 if (err)
2070 dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
2071 }
2072 2093
2073 return 0; 2094 return 0;
2074} 2095}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index e2c022d3e2f3..13b30eaa13d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -229,6 +229,11 @@ struct mlxsw_sp_port {
229 struct ieee_maxrate *maxrate; 229 struct ieee_maxrate *maxrate;
230 struct ieee_pfc *pfc; 230 struct ieee_pfc *pfc;
231 } dcb; 231 } dcb;
232 struct {
233 u8 module;
234 u8 width;
235 u8 lane;
236 } mapping;
232 /* 802.1Q bridge VLANs */ 237 /* 802.1Q bridge VLANs */
233 unsigned long *active_vlans; 238 unsigned long *active_vlans;
234 unsigned long *untagged_vlans; 239 unsigned long *untagged_vlans;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cbf58e1f9333..21ec1c2df2c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
192 struct dcbx_app_priority_entry *p_tbl, 192 struct dcbx_app_priority_entry *p_tbl,
193 u32 pri_tc_tbl, int count, bool dcbx_enabled) 193 u32 pri_tc_tbl, int count, bool dcbx_enabled)
194{ 194{
195 u8 tc, priority, priority_map; 195 u8 tc, priority_map;
196 enum dcbx_protocol_type type; 196 enum dcbx_protocol_type type;
197 u16 protocol_id; 197 u16 protocol_id;
198 int priority;
198 bool enable; 199 bool enable;
199 int i; 200 int i;
200 201
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
221 * indication, but we only got here if there was an 222 * indication, but we only got here if there was an
222 * app tlv for the protocol, so dcbx must be enabled. 223 * app tlv for the protocol, so dcbx must be enabled.
223 */ 224 */
224 enable = !!(type == DCBX_PROTOCOL_ETH); 225 enable = !(type == DCBX_PROTOCOL_ETH);
225 226
226 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, 227 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
227 priority, tc, type); 228 priority, tc, type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 089016f46f26..2d89e8c16b32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
155 } 155 }
156} 156}
157 157
158static int qed_init_qm_info(struct qed_hwfn *p_hwfn) 158static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
159{ 159{
160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; 160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
161 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 161 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
162 struct init_qm_port_params *p_qm_port; 162 struct init_qm_port_params *p_qm_port;
163 u16 num_pqs, multi_cos_tcs = 1; 163 u16 num_pqs, multi_cos_tcs = 1;
164 u8 pf_wfq = qm_info->pf_wfq;
165 u32 pf_rl = qm_info->pf_rl;
164 u16 num_vfs = 0; 166 u16 num_vfs = 0;
165 167
166#ifdef CONFIG_QED_SRIOV 168#ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
182 184
183 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. 185 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
184 */ 186 */
185 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * 187 qm_info->qm_pq_params = kcalloc(num_pqs,
186 num_pqs, GFP_KERNEL); 188 sizeof(struct init_qm_pq_params),
189 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
187 if (!qm_info->qm_pq_params) 190 if (!qm_info->qm_pq_params)
188 goto alloc_err; 191 goto alloc_err;
189 192
190 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * 193 qm_info->qm_vport_params = kcalloc(num_vports,
191 num_vports, GFP_KERNEL); 194 sizeof(struct init_qm_vport_params),
195 b_sleepable ? GFP_KERNEL
196 : GFP_ATOMIC);
192 if (!qm_info->qm_vport_params) 197 if (!qm_info->qm_vport_params)
193 goto alloc_err; 198 goto alloc_err;
194 199
195 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * 200 qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
196 MAX_NUM_PORTS, GFP_KERNEL); 201 sizeof(struct init_qm_port_params),
202 b_sleepable ? GFP_KERNEL
203 : GFP_ATOMIC);
197 if (!qm_info->qm_port_params) 204 if (!qm_info->qm_port_params)
198 goto alloc_err; 205 goto alloc_err;
199 206
200 qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), 207 qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
201 GFP_KERNEL); 208 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
202 if (!qm_info->wfq_data) 209 if (!qm_info->wfq_data)
203 goto alloc_err; 210 goto alloc_err;
204 211
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
264 for (i = 0; i < qm_info->num_vports; i++) 271 for (i = 0; i < qm_info->num_vports; i++)
265 qm_info->qm_vport_params[i].vport_wfq = 1; 272 qm_info->qm_vport_params[i].vport_wfq = 1;
266 273
267 qm_info->pf_wfq = 0;
268 qm_info->pf_rl = 0;
269 qm_info->vport_rl_en = 1; 274 qm_info->vport_rl_en = 1;
270 qm_info->vport_wfq_en = 1; 275 qm_info->vport_wfq_en = 1;
276 qm_info->pf_rl = pf_rl;
277 qm_info->pf_wfq = pf_wfq;
271 278
272 return 0; 279 return 0;
273 280
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
299 qed_qm_info_free(p_hwfn); 306 qed_qm_info_free(p_hwfn);
300 307
301 /* initialize qed's qm data structure */ 308 /* initialize qed's qm data structure */
302 rc = qed_init_qm_info(p_hwfn); 309 rc = qed_init_qm_info(p_hwfn, false);
303 if (rc) 310 if (rc)
304 return rc; 311 return rc;
305 312
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
388 goto alloc_err; 395 goto alloc_err;
389 396
390 /* Prepare and process QM requirements */ 397 /* Prepare and process QM requirements */
391 rc = qed_init_qm_info(p_hwfn); 398 rc = qed_init_qm_info(p_hwfn, true);
392 if (rc) 399 if (rc)
393 goto alloc_err; 400 goto alloc_err;
394 401
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
581 588
582 hw_mode |= 1 << MODE_ASIC; 589 hw_mode |= 1 << MODE_ASIC;
583 590
591 if (p_hwfn->cdev->num_hwfns > 1)
592 hw_mode |= 1 << MODE_100G;
593
584 p_hwfn->hw_info.hw_mode = hw_mode; 594 p_hwfn->hw_info.hw_mode = hw_mode;
595
596 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
597 "Configuring function for hw_mode: 0x%08x\n",
598 p_hwfn->hw_info.hw_mode);
585} 599}
586 600
587/* Init run time data for all PFs on an engine. */ 601/* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
821 u32 load_code, param; 835 u32 load_code, param;
822 int rc, mfw_rc, i; 836 int rc, mfw_rc, i;
823 837
838 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
839 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
840 return -EINVAL;
841 }
842
824 if (IS_PF(cdev)) { 843 if (IS_PF(cdev)) {
825 rc = qed_init_fw_data(cdev, bin_fw_data); 844 rc = qed_init_fw_data(cdev, bin_fw_data);
826 if (rc != 0) 845 if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
2086{ 2105{
2087 int i; 2106 int i;
2088 2107
2108 if (cdev->num_hwfns > 1) {
2109 DP_VERBOSE(cdev,
2110 NETIF_MSG_LINK,
2111 "WFQ configuration is not supported for this device\n");
2112 return;
2113 }
2114
2089 for_each_hwfn(cdev, i) { 2115 for_each_hwfn(cdev, i) {
2090 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2116 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2091 2117
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 8b22f87033ce..61cc6869fa65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
413 /* Fallthrough */ 413 /* Fallthrough */
414 414
415 case QED_INT_MODE_MSI: 415 case QED_INT_MODE_MSI:
416 rc = pci_enable_msi(cdev->pdev); 416 if (cdev->num_hwfns == 1) {
417 if (!rc) { 417 rc = pci_enable_msi(cdev->pdev);
418 int_params->out.int_mode = QED_INT_MODE_MSI; 418 if (!rc) {
419 goto out; 419 int_params->out.int_mode = QED_INT_MODE_MSI;
420 } 420 goto out;
421 }
421 422
422 DP_NOTICE(cdev, "Failed to enable MSI\n"); 423 DP_NOTICE(cdev, "Failed to enable MSI\n");
423 if (force_mode) 424 if (force_mode)
424 goto out; 425 goto out;
426 }
425 /* Fallthrough */ 427 /* Fallthrough */
426 428
427 case QED_INT_MODE_INTA: 429 case QED_INT_MODE_INTA:
@@ -1103,6 +1105,39 @@ static int qed_get_port_type(u32 media_type)
1103 return port_type; 1105 return port_type;
1104} 1106}
1105 1107
1108static int qed_get_link_data(struct qed_hwfn *hwfn,
1109 struct qed_mcp_link_params *params,
1110 struct qed_mcp_link_state *link,
1111 struct qed_mcp_link_capabilities *link_caps)
1112{
1113 void *p;
1114
1115 if (!IS_PF(hwfn->cdev)) {
1116 qed_vf_get_link_params(hwfn, params);
1117 qed_vf_get_link_state(hwfn, link);
1118 qed_vf_get_link_caps(hwfn, link_caps);
1119
1120 return 0;
1121 }
1122
1123 p = qed_mcp_get_link_params(hwfn);
1124 if (!p)
1125 return -ENXIO;
1126 memcpy(params, p, sizeof(*params));
1127
1128 p = qed_mcp_get_link_state(hwfn);
1129 if (!p)
1130 return -ENXIO;
1131 memcpy(link, p, sizeof(*link));
1132
1133 p = qed_mcp_get_link_capabilities(hwfn);
1134 if (!p)
1135 return -ENXIO;
1136 memcpy(link_caps, p, sizeof(*link_caps));
1137
1138 return 0;
1139}
1140
1106static void qed_fill_link(struct qed_hwfn *hwfn, 1141static void qed_fill_link(struct qed_hwfn *hwfn,
1107 struct qed_link_output *if_link) 1142 struct qed_link_output *if_link)
1108{ 1143{
@@ -1114,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
1114 memset(if_link, 0, sizeof(*if_link)); 1149 memset(if_link, 0, sizeof(*if_link));
1115 1150
1116 /* Prepare source inputs */ 1151 /* Prepare source inputs */
1117 if (IS_PF(hwfn->cdev)) { 1152 if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1118 memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params)); 1153 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1119 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 1154 return;
1120 memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
1121 sizeof(link_caps));
1122 } else {
1123 qed_vf_get_link_params(hwfn, &params);
1124 qed_vf_get_link_state(hwfn, &link);
1125 qed_vf_get_link_caps(hwfn, &link_caps);
1126 } 1155 }
1127 1156
1128 /* Set the link parameters to pass to protocol driver */ 1157 /* Set the link parameters to pass to protocol driver */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c8667c65e685..c90b2b6ad969 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -12,11 +12,13 @@
12#include "qed_vf.h" 12#include "qed_vf.h"
13#define QED_VF_ARRAY_LENGTH (3) 13#define QED_VF_ARRAY_LENGTH (3)
14 14
15#ifdef CONFIG_QED_SRIOV
15#define IS_VF(cdev) ((cdev)->b_is_vf) 16#define IS_VF(cdev) ((cdev)->b_is_vf)
16#define IS_PF(cdev) (!((cdev)->b_is_vf)) 17#define IS_PF(cdev) (!((cdev)->b_is_vf))
17#ifdef CONFIG_QED_SRIOV
18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
19#else 19#else
20#define IS_VF(cdev) (0)
21#define IS_PF(cdev) (1)
20#define IS_PF_SRIOV(p_hwfn) (0) 22#define IS_PF_SRIOV(p_hwfn) (0)
21#endif 23#endif
22#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 24#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1bc75358cbc4..ad3cae3b7243 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
230 case ETH_SS_PRIV_FLAGS: 230 case ETH_SS_PRIV_FLAGS:
231 return QEDE_PRI_FLAG_LEN; 231 return QEDE_PRI_FLAG_LEN;
232 case ETH_SS_TEST: 232 case ETH_SS_TEST:
233 return QEDE_ETHTOOL_TEST_MAX; 233 if (!IS_VF(edev))
234 return QEDE_ETHTOOL_TEST_MAX;
235 else
236 return 0;
234 default: 237 default:
235 DP_VERBOSE(edev, QED_MSG_DEBUG, 238 DP_VERBOSE(edev, QED_MSG_DEBUG,
236 "Unsupported stringset 0x%08x\n", stringset); 239 "Unsupported stringset 0x%08x\n", stringset);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 337e839ca586..5733d1888223 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
87 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 87 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
89 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 89 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
90#ifdef CONFIG_QED_SRIOV
90 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 91 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
92#endif
91 { 0 } 93 { 0 }
92}; 94};
93 95
@@ -1824,7 +1826,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
1824{ 1826{
1825 struct qede_dev *edev = netdev_priv(dev); 1827 struct qede_dev *edev = netdev_priv(dev);
1826 1828
1827 return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, 1829 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
1828 max_tx_rate); 1830 max_tx_rate);
1829} 1831}
1830 1832
@@ -2091,6 +2093,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
2091 edev->accept_any_vlan = false; 2093 edev->accept_any_vlan = false;
2092} 2094}
2093 2095
2096int qede_set_features(struct net_device *dev, netdev_features_t features)
2097{
2098 struct qede_dev *edev = netdev_priv(dev);
2099 netdev_features_t changes = features ^ dev->features;
2100 bool need_reload = false;
2101
2102 /* No action needed if hardware GRO is disabled during driver load */
2103 if (changes & NETIF_F_GRO) {
2104 if (dev->features & NETIF_F_GRO)
2105 need_reload = !edev->gro_disable;
2106 else
2107 need_reload = edev->gro_disable;
2108 }
2109
2110 if (need_reload && netif_running(edev->ndev)) {
2111 dev->features = features;
2112 qede_reload(edev, NULL, NULL);
2113 return 1;
2114 }
2115
2116 return 0;
2117}
2118
2094#ifdef CONFIG_QEDE_VXLAN 2119#ifdef CONFIG_QEDE_VXLAN
2095static void qede_add_vxlan_port(struct net_device *dev, 2120static void qede_add_vxlan_port(struct net_device *dev,
2096 sa_family_t sa_family, __be16 port) 2121 sa_family_t sa_family, __be16 port)
@@ -2175,6 +2200,7 @@ static const struct net_device_ops qede_netdev_ops = {
2175#endif 2200#endif
2176 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 2201 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
2177 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 2202 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
2203 .ndo_set_features = qede_set_features,
2178 .ndo_get_stats64 = qede_get_stats64, 2204 .ndo_get_stats64 = qede_get_stats64,
2179#ifdef CONFIG_QED_SRIOV 2205#ifdef CONFIG_QED_SRIOV
2180 .ndo_set_vf_link_state = qede_set_vf_link_state, 2206 .ndo_set_vf_link_state = qede_set_vf_link_state,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 83d72106471c..fd5d1c93b55b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
4846 } 4846 }
4847 4847
4848 /* Disabling the timer */ 4848 /* Disabling the timer */
4849 del_timer_sync(&qdev->timer);
4850 ql_cancel_all_work_sync(qdev); 4849 ql_cancel_all_work_sync(qdev);
4851 4850
4852 for (i = 0; i < qdev->rss_ring_count; i++) 4851 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4873 return PCI_ERS_RESULT_CAN_RECOVER; 4872 return PCI_ERS_RESULT_CAN_RECOVER;
4874 case pci_channel_io_frozen: 4873 case pci_channel_io_frozen:
4875 netif_device_detach(ndev); 4874 netif_device_detach(ndev);
4875 del_timer_sync(&qdev->timer);
4876 if (netif_running(ndev)) 4876 if (netif_running(ndev))
4877 ql_eeh_close(ndev); 4877 ql_eeh_close(ndev);
4878 pci_disable_device(pdev); 4878 pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4880 case pci_channel_io_perm_failure: 4880 case pci_channel_io_perm_failure:
4881 dev_err(&pdev->dev, 4881 dev_err(&pdev->dev,
4882 "%s: pci_channel_io_perm_failure.\n", __func__); 4882 "%s: pci_channel_io_perm_failure.\n", __func__);
4883 del_timer_sync(&qdev->timer);
4883 ql_eeh_close(ndev); 4884 ql_eeh_close(ndev);
4884 set_bit(QL_EEH_FATAL, &qdev->flags); 4885 set_bit(QL_EEH_FATAL, &qdev->flags);
4885 return PCI_ERS_RESULT_DISCONNECT; 4886 return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1681084cc96f..1f309127457d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -619,6 +619,17 @@ fail:
619 return rc; 619 return rc;
620} 620}
621 621
622static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
623{
624 struct efx_channel *channel;
625 struct efx_tx_queue *tx_queue;
626
627 /* All our existing PIO buffers went away */
628 efx_for_each_channel(channel, efx)
629 efx_for_each_channel_tx_queue(tx_queue, channel)
630 tx_queue->piobuf = NULL;
631}
632
622#else /* !EFX_USE_PIO */ 633#else /* !EFX_USE_PIO */
623 634
624static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 635static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
635{ 646{
636} 647}
637 648
649static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
650{
651}
652
638#endif /* EFX_USE_PIO */ 653#endif /* EFX_USE_PIO */
639 654
640static void efx_ef10_remove(struct efx_nic *efx) 655static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1018 nic_data->must_realloc_vis = true; 1033 nic_data->must_realloc_vis = true;
1019 nic_data->must_restore_filters = true; 1034 nic_data->must_restore_filters = true;
1020 nic_data->must_restore_piobufs = true; 1035 nic_data->must_restore_piobufs = true;
1036 efx_ef10_forget_old_piobufs(efx);
1021 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1037 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1022 1038
1023 /* Driver-created vswitches and vports must be re-created */ 1039 /* Driver-created vswitches and vports must be re-created */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869487..097f363f1630 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
1726 1726
1727#ifdef CONFIG_RFS_ACCEL 1727#ifdef CONFIG_RFS_ACCEL
1728 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1728 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1729 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, 1729 struct efx_channel *channel;
1730 sizeof(*efx->rps_flow_id), 1730 int i, success = 1;
1731 GFP_KERNEL); 1731
1732 if (!efx->rps_flow_id) { 1732 efx_for_each_channel(channel, efx) {
1733 channel->rps_flow_id =
1734 kcalloc(efx->type->max_rx_ip_filters,
1735 sizeof(*channel->rps_flow_id),
1736 GFP_KERNEL);
1737 if (!channel->rps_flow_id)
1738 success = 0;
1739 else
1740 for (i = 0;
1741 i < efx->type->max_rx_ip_filters;
1742 ++i)
1743 channel->rps_flow_id[i] =
1744 RPS_FLOW_ID_INVALID;
1745 }
1746
1747 if (!success) {
1748 efx_for_each_channel(channel, efx)
1749 kfree(channel->rps_flow_id);
1733 efx->type->filter_table_remove(efx); 1750 efx->type->filter_table_remove(efx);
1734 rc = -ENOMEM; 1751 rc = -ENOMEM;
1735 goto out_unlock; 1752 goto out_unlock;
1736 } 1753 }
1754
1755 efx->rps_expire_index = efx->rps_expire_channel = 0;
1737 } 1756 }
1738#endif 1757#endif
1739out_unlock: 1758out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
1744static void efx_remove_filters(struct efx_nic *efx) 1763static void efx_remove_filters(struct efx_nic *efx)
1745{ 1764{
1746#ifdef CONFIG_RFS_ACCEL 1765#ifdef CONFIG_RFS_ACCEL
1747 kfree(efx->rps_flow_id); 1766 struct efx_channel *channel;
1767
1768 efx_for_each_channel(channel, efx)
1769 kfree(channel->rps_flow_id);
1748#endif 1770#endif
1749 down_write(&efx->filter_sem); 1771 down_write(&efx->filter_sem);
1750 efx->type->filter_table_remove(efx); 1772 efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7b80..2a9228a6e4a0 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
189 189
190 case MC_CMD_MEDIA_XFP: 190 case MC_CMD_MEDIA_XFP:
191 case MC_CMD_MEDIA_SFP_PLUS: 191 case MC_CMD_MEDIA_SFP_PLUS:
192 result |= SUPPORTED_FIBRE;
193 break;
194
195 case MC_CMD_MEDIA_QSFP_PLUS: 192 case MC_CMD_MEDIA_QSFP_PLUS:
196 result |= SUPPORTED_FIBRE; 193 result |= SUPPORTED_FIBRE;
194 if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
195 result |= SUPPORTED_1000baseT_Full;
196 if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
197 result |= SUPPORTED_10000baseT_Full;
197 if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 198 if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
198 result |= SUPPORTED_40000baseCR4_Full; 199 result |= SUPPORTED_40000baseCR4_Full;
199 break; 200 break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321cda..d13ddf9703ff 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
403 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 403 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
404 * @irq_count: Number of IRQs since last adaptive moderation decision 404 * @irq_count: Number of IRQs since last adaptive moderation decision
405 * @irq_mod_score: IRQ moderation score 405 * @irq_mod_score: IRQ moderation score
406 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
407 * indexed by filter ID
406 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 408 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
407 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 409 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
408 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 410 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
446 unsigned int irq_mod_score; 448 unsigned int irq_mod_score;
447#ifdef CONFIG_RFS_ACCEL 449#ifdef CONFIG_RFS_ACCEL
448 unsigned int rfs_filters_added; 450 unsigned int rfs_filters_added;
451#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
452 u32 *rps_flow_id;
449#endif 453#endif
450 454
451 unsigned n_rx_tobe_disc; 455 unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
889 * @filter_sem: Filter table rw_semaphore, for freeing the table 893 * @filter_sem: Filter table rw_semaphore, for freeing the table
890 * @filter_lock: Filter table lock, for mere content changes 894 * @filter_lock: Filter table lock, for mere content changes
891 * @filter_state: Architecture-dependent filter table state 895 * @filter_state: Architecture-dependent filter table state
892 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, 896 * @rps_expire_channel: Next channel to check for expiry
893 * indexed by filter ID 897 * @rps_expire_index: Next index to check for expiry in
894 * @rps_expire_index: Next index to check for expiry in @rps_flow_id 898 * @rps_expire_channel's @rps_flow_id
895 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 899 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
896 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 900 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
897 * Decremented when the efx_flush_rx_queue() is called. 901 * Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
1035 spinlock_t filter_lock; 1039 spinlock_t filter_lock;
1036 void *filter_state; 1040 void *filter_state;
1037#ifdef CONFIG_RFS_ACCEL 1041#ifdef CONFIG_RFS_ACCEL
1038 u32 *rps_flow_id; 1042 unsigned int rps_expire_channel;
1039 unsigned int rps_expire_index; 1043 unsigned int rps_expire_index;
1040#endif 1044#endif
1041 1045
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
842 struct efx_nic *efx = netdev_priv(net_dev); 842 struct efx_nic *efx = netdev_priv(net_dev);
843 struct efx_channel *channel; 843 struct efx_channel *channel;
844 struct efx_filter_spec spec; 844 struct efx_filter_spec spec;
845 const __be16 *ports; 845 struct flow_keys fk;
846 __be16 ether_type;
847 int nhoff;
848 int rc; 846 int rc;
849 847
850 /* The core RPS/RFS code has already parsed and validated 848 if (flow_id == RPS_FLOW_ID_INVALID)
851 * VLAN, IP and transport headers. We assume they are in the 849 return -EINVAL;
852 * header area.
853 */
854
855 if (skb->protocol == htons(ETH_P_8021Q)) {
856 const struct vlan_hdr *vh =
857 (const struct vlan_hdr *)skb->data;
858 850
859 /* We can't filter on the IP 5-tuple and the vlan 851 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
860 * together, so just strip the vlan header and filter 852 return -EPROTONOSUPPORT;
861 * on the IP part.
862 */
863 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
864 ether_type = vh->h_vlan_encapsulated_proto;
865 nhoff = sizeof(struct vlan_hdr);
866 } else {
867 ether_type = skb->protocol;
868 nhoff = 0;
869 }
870 853
871 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) 854 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
855 return -EPROTONOSUPPORT;
856 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
872 return -EPROTONOSUPPORT; 857 return -EPROTONOSUPPORT;
873 858
874 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 859 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
878 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 863 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
879 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 864 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
880 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 865 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
881 spec.ether_type = ether_type; 866 spec.ether_type = fk.basic.n_proto;
882 867 spec.ip_proto = fk.basic.ip_proto;
883 if (ether_type == htons(ETH_P_IP)) { 868
884 const struct iphdr *ip = 869 if (fk.basic.n_proto == htons(ETH_P_IP)) {
885 (const struct iphdr *)(skb->data + nhoff); 870 spec.rem_host[0] = fk.addrs.v4addrs.src;
886 871 spec.loc_host[0] = fk.addrs.v4addrs.dst;
887 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
888 if (ip_is_fragment(ip))
889 return -EPROTONOSUPPORT;
890 spec.ip_proto = ip->protocol;
891 spec.rem_host[0] = ip->saddr;
892 spec.loc_host[0] = ip->daddr;
893 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
894 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
895 } else { 872 } else {
896 const struct ipv6hdr *ip6 = 873 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
897 (const struct ipv6hdr *)(skb->data + nhoff); 874 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
898
899 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
900 nhoff + sizeof(*ip6) + 4);
901 spec.ip_proto = ip6->nexthdr;
902 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
903 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
904 ports = (const __be16 *)(ip6 + 1);
905 } 875 }
906 876
907 spec.rem_port = ports[0]; 877 spec.rem_port = fk.ports.src;
908 spec.loc_port = ports[1]; 878 spec.loc_port = fk.ports.dst;
909 879
910 rc = efx->type->filter_rfs_insert(efx, &spec); 880 rc = efx->type->filter_rfs_insert(efx, &spec);
911 if (rc < 0) 881 if (rc < 0)
912 return rc; 882 return rc;
913 883
914 /* Remember this so we can check whether to expire the filter later */ 884 /* Remember this so we can check whether to expire the filter later */
915 efx->rps_flow_id[rc] = flow_id; 885 channel = efx_get_channel(efx, rxq_index);
916 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 886 channel->rps_flow_id[rc] = flow_id;
917 ++channel->rfs_filters_added; 887 ++channel->rfs_filters_added;
918 888
919 if (ether_type == htons(ETH_P_IP)) 889 if (spec.ether_type == htons(ETH_P_IP))
920 netif_info(efx, rx_status, efx->net_dev, 890 netif_info(efx, rx_status, efx->net_dev,
921 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 891 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
922 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 892 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
923 spec.rem_host, ntohs(ports[0]), spec.loc_host, 893 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
924 ntohs(ports[1]), rxq_index, flow_id, rc); 894 ntohs(spec.loc_port), rxq_index, flow_id, rc);
925 else 895 else
926 netif_info(efx, rx_status, efx->net_dev, 896 netif_info(efx, rx_status, efx->net_dev,
927 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 897 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
928 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 898 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
929 spec.rem_host, ntohs(ports[0]), spec.loc_host, 899 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
930 ntohs(ports[1]), rxq_index, flow_id, rc); 900 ntohs(spec.loc_port), rxq_index, flow_id, rc);
931 901
932 return rc; 902 return rc;
933} 903}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
935bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 905bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
936{ 906{
937 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 907 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
938 unsigned int index, size; 908 unsigned int channel_idx, index, size;
939 u32 flow_id; 909 u32 flow_id;
940 910
941 if (!spin_trylock_bh(&efx->filter_lock)) 911 if (!spin_trylock_bh(&efx->filter_lock))
942 return false; 912 return false;
943 913
944 expire_one = efx->type->filter_rfs_expire_one; 914 expire_one = efx->type->filter_rfs_expire_one;
915 channel_idx = efx->rps_expire_channel;
945 index = efx->rps_expire_index; 916 index = efx->rps_expire_index;
946 size = efx->type->max_rx_ip_filters; 917 size = efx->type->max_rx_ip_filters;
947 while (quota--) { 918 while (quota--) {
948 flow_id = efx->rps_flow_id[index]; 919 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
949 if (expire_one(efx, flow_id, index)) 920 flow_id = channel->rps_flow_id[index];
921
922 if (flow_id != RPS_FLOW_ID_INVALID &&
923 expire_one(efx, flow_id, index)) {
950 netif_info(efx, rx_status, efx->net_dev, 924 netif_info(efx, rx_status, efx->net_dev,
951 "expired filter %d [flow %u]\n", 925 "expired filter %d [queue %u flow %u]\n",
952 index, flow_id); 926 index, channel_idx, flow_id);
953 if (++index == size) 927 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
928 }
929 if (++index == size) {
930 if (++channel_idx == efx->n_channels)
931 channel_idx = 0;
954 index = 0; 932 index = 0;
933 }
955 } 934 }
935 efx->rps_expire_channel = channel_idx;
956 efx->rps_expire_index = index; 936 efx->rps_expire_index = index;
957 937
958 spin_unlock_bh(&efx->filter_lock); 938 spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 4f7283d05588..44da877d2483 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
156 struct netdev_hw_addr *ha; 156 struct netdev_hw_addr *ha;
157 157
158 netdev_for_each_uc_addr(ha, dev) { 158 netdev_for_each_uc_addr(ha, dev) {
159 dwmac4_set_umac_addr(ioaddr, ha->addr, reg); 159 dwmac4_set_umac_addr(hw, ha->addr, reg);
160 reg++; 160 reg++;
161 } 161 }
162 } 162 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eac45d0c75e2..a473c182c91d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
3450 if (!netif_running(ndev)) 3450 if (!netif_running(ndev))
3451 return 0; 3451 return 0;
3452 3452
3453 spin_lock_irqsave(&priv->lock, flags);
3454
3455 /* Power Down bit, into the PM register, is cleared 3453 /* Power Down bit, into the PM register, is cleared
3456 * automatically as soon as a magic packet or a Wake-up frame 3454 * automatically as soon as a magic packet or a Wake-up frame
3457 * is received. Anyway, it's better to manually clear 3455 * is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
3459 * from another devices (e.g. serial console). 3457 * from another devices (e.g. serial console).
3460 */ 3458 */
3461 if (device_may_wakeup(priv->device)) { 3459 if (device_may_wakeup(priv->device)) {
3460 spin_lock_irqsave(&priv->lock, flags);
3462 priv->hw->mac->pmt(priv->hw, 0); 3461 priv->hw->mac->pmt(priv->hw, 0);
3462 spin_unlock_irqrestore(&priv->lock, flags);
3463 priv->irq_wake = 0; 3463 priv->irq_wake = 0;
3464 } else { 3464 } else {
3465 pinctrl_pm_select_default_state(priv->device); 3465 pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
3473 3473
3474 netif_device_attach(ndev); 3474 netif_device_attach(ndev);
3475 3475
3476 spin_lock_irqsave(&priv->lock, flags);
3477
3476 priv->cur_rx = 0; 3478 priv->cur_rx = 0;
3477 priv->dirty_rx = 0; 3479 priv->dirty_rx = 0;
3478 priv->dirty_tx = 0; 3480 priv->dirty_tx = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 3f83c369f56c..ec295851812b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 if (mdio_bus_data->irqs) 299 if (mdio_bus_data->irqs)
300 memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); 300 memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
301 301
302#ifdef CONFIG_OF 302#ifdef CONFIG_OF
303 if (priv->device->of_node) 303 if (priv->device->of_node)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4b08a2f52b3e..e6bb0ecb12c7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1339 if (priv->coal_intvl != 0) { 1339 if (priv->coal_intvl != 0) {
1340 struct ethtool_coalesce coal; 1340 struct ethtool_coalesce coal;
1341 1341
1342 coal.rx_coalesce_usecs = (priv->coal_intvl << 4); 1342 coal.rx_coalesce_usecs = priv->coal_intvl;
1343 cpsw_set_coalesce(ndev, &coal); 1343 cpsw_set_coalesce(ndev, &coal);
1344 } 1344 }
1345 1345
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a0f64cba86ba..2ace126533cd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void __team_compute_features(struct team *team) 993static void ___team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
1021 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1021 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1022 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1022 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024}
1024 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1025 netdev_change_features(team->dev); 1029 netdev_change_features(team->dev);
1026} 1030}
1027 1031
1028static void team_compute_features(struct team *team) 1032static void team_compute_features(struct team *team)
1029{ 1033{
1030 mutex_lock(&team->lock); 1034 mutex_lock(&team->lock);
1031 __team_compute_features(team); 1035 ___team_compute_features(team);
1032 mutex_unlock(&team->lock); 1036 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev);
1033} 1038}
1034 1039
1035static int team_port_enter(struct team *team, struct team_port *port) 1040static int team_port_enter(struct team *team, struct team_port *port)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36cd7f016a8d..9bbe0161a2f4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
473 goto goon; 473 goto goon;
474 } 474 }
475 475
476 if (!count || count < 4) 476 if (count < 4)
477 goto goon; 477 goto goon;
478 478
479 rx_status = buf[count - 2]; 479 rx_status = buf[count - 2];
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d9d2806a47b1..dc989a8b5afb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
61#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ 61#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
62 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) 62 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
63 63
64#define CARRIER_CHECK_DELAY (2 * HZ)
65
64struct smsc95xx_priv { 66struct smsc95xx_priv {
65 u32 mac_cr; 67 u32 mac_cr;
66 u32 hash_hi; 68 u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
69 spinlock_t mac_cr_lock; 71 spinlock_t mac_cr_lock;
70 u8 features; 72 u8 features;
71 u8 suspend_flags; 73 u8 suspend_flags;
74 bool link_ok;
75 struct delayed_work carrier_check;
76 struct usbnet *dev;
72}; 77};
73 78
74static bool turbo_mode = true; 79static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
624 intdata); 629 intdata);
625} 630}
626 631
632static void set_carrier(struct usbnet *dev, bool link)
633{
634 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
635
636 if (pdata->link_ok == link)
637 return;
638
639 pdata->link_ok = link;
640
641 if (link)
642 usbnet_link_change(dev, 1, 0);
643 else
644 usbnet_link_change(dev, 0, 0);
645}
646
647static void check_carrier(struct work_struct *work)
648{
649 struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
650 carrier_check.work);
651 struct usbnet *dev = pdata->dev;
652 int ret;
653
654 if (pdata->suspend_flags != 0)
655 return;
656
657 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
658 if (ret < 0) {
659 netdev_warn(dev->net, "Failed to read MII_BMSR\n");
660 return;
661 }
662 if (ret & BMSR_LSTATUS)
663 set_carrier(dev, 1);
664 else
665 set_carrier(dev, 0);
666
667 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
668}
669
627/* Enable or disable Tx & Rx checksum offload engines */ 670/* Enable or disable Tx & Rx checksum offload engines */
628static int smsc95xx_set_features(struct net_device *netdev, 671static int smsc95xx_set_features(struct net_device *netdev,
629 netdev_features_t features) 672 netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1165 dev->net->flags |= IFF_MULTICAST; 1208 dev->net->flags |= IFF_MULTICAST;
1166 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1209 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1167 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 1210 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1211
1212 pdata->dev = dev;
1213 INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
1214 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
1215
1168 return 0; 1216 return 0;
1169} 1217}
1170 1218
1171static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) 1219static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1172{ 1220{
1173 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1221 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1222
1174 if (pdata) { 1223 if (pdata) {
1224 cancel_delayed_work(&pdata->carrier_check);
1175 netif_dbg(dev, ifdown, dev->net, "free pdata\n"); 1225 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
1176 kfree(pdata); 1226 kfree(pdata);
1177 pdata = NULL; 1227 pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
1695 1745
1696 /* do this first to ensure it's cleared even in error case */ 1746 /* do this first to ensure it's cleared even in error case */
1697 pdata->suspend_flags = 0; 1747 pdata->suspend_flags = 0;
1748 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
1698 1749
1699 if (suspend_flags & SUSPEND_ALLMODES) { 1750 if (suspend_flags & SUSPEND_ALLMODES) {
1700 /* clear wake-up sources */ 1751 /* clear wake-up sources */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540343..e0638e556fe7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
1925 1925
1926 virtio_device_ready(vdev); 1926 virtio_device_ready(vdev);
1927 1927
1928 /* Last of all, set up some receive buffers. */
1929 for (i = 0; i < vi->curr_queue_pairs; i++) {
1930 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
1931
1932 /* If we didn't even get one input buffer, we're useless. */
1933 if (vi->rq[i].vq->num_free ==
1934 virtqueue_get_vring_size(vi->rq[i].vq)) {
1935 free_unused_bufs(vi);
1936 err = -ENOMEM;
1937 goto free_recv_bufs;
1938 }
1939 }
1940
1941 vi->nb.notifier_call = &virtnet_cpu_callback; 1928 vi->nb.notifier_call = &virtnet_cpu_callback;
1942 err = register_hotcpu_notifier(&vi->nb); 1929 err = register_hotcpu_notifier(&vi->nb);
1943 if (err) { 1930 if (err) {
1944 pr_debug("virtio_net: registering cpu notifier failed\n"); 1931 pr_debug("virtio_net: registering cpu notifier failed\n");
1945 goto free_recv_bufs; 1932 goto free_unregister_netdev;
1946 } 1933 }
1947 1934
1948 /* Assume link up if device can't report link status, 1935 /* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1960 1947
1961 return 0; 1948 return 0;
1962 1949
1963free_recv_bufs: 1950free_unregister_netdev:
1964 vi->vdev->config->reset(vdev); 1951 vi->vdev->config->reset(vdev);
1965 1952
1966 free_receive_bufs(vi);
1967 unregister_netdev(dev); 1953 unregister_netdev(dev);
1968free_vqs: 1954free_vqs:
1969 cancel_delayed_work_sync(&vi->refill); 1955 cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022ae415b..08885bc8d6db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1369 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; 1369 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1370 1370
1371 segCnt = rcdlro->segCnt; 1371 segCnt = rcdlro->segCnt;
1372 BUG_ON(segCnt <= 1); 1372 WARN_ON_ONCE(segCnt == 0);
1373 mss = rcdlro->mss; 1373 mss = rcdlro->mss;
1374 if (unlikely(segCnt <= 1)) 1374 if (unlikely(segCnt <= 1))
1375 segCnt = 0; 1375 segCnt = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c4825392d64b..3d2b64e63408 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ff30c3bdfce..f999db2f97b4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3086 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3086 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
3087 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3087 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3088 3088
3089 if (tb[IFLA_MTU])
3090 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3091
3089 err = vxlan_dev_configure(src_net, dev, &conf); 3092 err = vxlan_dev_configure(src_net, dev, &conf);
3090 switch (err) { 3093 switch (err) {
3091 case -ENODEV: 3094 case -ENODEV:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d0631b6cfd53..62f475e31077 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2540 const u8 *mac, struct station_info *sinfo) 2540 const u8 *mac, struct station_info *sinfo)
2541{ 2541{
2542 struct brcmf_if *ifp = netdev_priv(ndev); 2542 struct brcmf_if *ifp = netdev_priv(ndev);
2543 struct brcmf_scb_val_le scb_val;
2543 s32 err = 0; 2544 s32 err = 0;
2544 struct brcmf_sta_info_le sta_info_le; 2545 struct brcmf_sta_info_le sta_info_le;
2545 u32 sta_flags; 2546 u32 sta_flags;
2546 u32 is_tdls_peer; 2547 u32 is_tdls_peer;
2547 s32 total_rssi; 2548 s32 total_rssi;
2548 s32 count_rssi; 2549 s32 count_rssi;
2550 int rssi;
2549 u32 i; 2551 u32 i;
2550 2552
2551 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); 2553 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2629 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); 2631 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
2630 total_rssi /= count_rssi; 2632 total_rssi /= count_rssi;
2631 sinfo->signal = total_rssi; 2633 sinfo->signal = total_rssi;
2634 } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
2635 &ifp->vif->sme_state)) {
2636 memset(&scb_val, 0, sizeof(scb_val));
2637 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
2638 &scb_val, sizeof(scb_val));
2639 if (err) {
2640 brcmf_err("Could not get rssi (%d)\n", err);
2641 goto done;
2642 } else {
2643 rssi = le32_to_cpu(scb_val.val);
2644 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
2645 sinfo->signal = rssi;
2646 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
2647 }
2632 } 2648 }
2633 } 2649 }
2634done: 2650done:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 68f1ce02f4bf..2b9a2bc429d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1157 brcmu_pkt_buf_free_skb(skb); 1157 brcmu_pkt_buf_free_skb(skb);
1158 return; 1158 return;
1159 } 1159 }
1160
1161 skb->protocol = eth_type_trans(skb, ifp->ndev);
1160 brcmf_netif_rx(ifp, skb); 1162 brcmf_netif_rx(ifp, skb);
1161} 1163}
1162 1164
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9ed0ed1bf514..4dd5adcdd29b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2776 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || 2776 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
2777 !info->attrs[HWSIM_ATTR_FLAGS] || 2777 !info->attrs[HWSIM_ATTR_FLAGS] ||
2778 !info->attrs[HWSIM_ATTR_COOKIE] || 2778 !info->attrs[HWSIM_ATTR_COOKIE] ||
2779 !info->attrs[HWSIM_ATTR_SIGNAL] ||
2779 !info->attrs[HWSIM_ATTR_TX_INFO]) 2780 !info->attrs[HWSIM_ATTR_TX_INFO])
2780 goto out; 2781 goto out;
2781 2782
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 0f48048b8654..3a0faa8fe9d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
54void rtl_addr_delay(u32 addr) 54void rtl_addr_delay(u32 addr)
55{ 55{
56 if (addr == 0xfe) 56 if (addr == 0xfe)
57 msleep(50); 57 mdelay(50);
58 else if (addr == 0xfd) 58 else if (addr == 0xfd)
59 msleep(5); 59 msleep(5);
60 else if (addr == 0xfc) 60 else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
75 rtl_addr_delay(addr); 75 rtl_addr_delay(addr);
76 } else { 76 } else {
77 rtl_set_rfreg(hw, rfpath, addr, mask, data); 77 rtl_set_rfreg(hw, rfpath, addr, mask, data);
78 usleep_range(1, 2); 78 udelay(1);
79 } 79 }
80} 80}
81EXPORT_SYMBOL(rtl_rfreg_delay); 81EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
86 rtl_addr_delay(addr); 86 rtl_addr_delay(addr);
87 } else { 87 } else {
88 rtl_set_bbreg(hw, addr, MASKDWORD, data); 88 rtl_set_bbreg(hw, addr, MASKDWORD, data);
89 usleep_range(1, 2); 89 udelay(1);
90 } 90 }
91} 91}
92EXPORT_SYMBOL(rtl_bb_delay); 92EXPORT_SYMBOL(rtl_bb_delay);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 78dca3193ca4..befac5b19490 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1679 1679
1680static void nvme_dev_unmap(struct nvme_dev *dev) 1680static void nvme_dev_unmap(struct nvme_dev *dev)
1681{ 1681{
1682 struct pci_dev *pdev = to_pci_dev(dev->dev);
1683 int bars;
1684
1682 if (dev->bar) 1685 if (dev->bar)
1683 iounmap(dev->bar); 1686 iounmap(dev->bar);
1684 pci_release_regions(to_pci_dev(dev->dev)); 1687
1688 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1689 pci_release_selected_regions(pdev, bars);
1685} 1690}
1686 1691
1687static void nvme_pci_disable(struct nvme_dev *dev) 1692static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
1924 1929
1925 return 0; 1930 return 0;
1926 release: 1931 release:
1927 pci_release_regions(pdev); 1932 pci_release_selected_regions(pdev, bars);
1928 return -ENODEV; 1933 return -ENODEV;
1929} 1934}
1930 1935
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 14f2f8c7c260..33daffc4392c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob,
395 struct device_node **nodepp) 395 struct device_node **nodepp)
396{ 396{
397 struct device_node *root; 397 struct device_node *root;
398 int offset = 0, depth = 0; 398 int offset = 0, depth = 0, initial_depth = 0;
399#define FDT_MAX_DEPTH 64 399#define FDT_MAX_DEPTH 64
400 unsigned int fpsizes[FDT_MAX_DEPTH]; 400 unsigned int fpsizes[FDT_MAX_DEPTH];
401 struct device_node *nps[FDT_MAX_DEPTH]; 401 struct device_node *nps[FDT_MAX_DEPTH];
@@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob,
405 if (nodepp) 405 if (nodepp)
406 *nodepp = NULL; 406 *nodepp = NULL;
407 407
408 /*
409 * We're unflattening device sub-tree if @dad is valid. There are
410 * possibly multiple nodes in the first level of depth. We need
411 * set @depth to 1 to make fdt_next_node() happy as it bails
412 * immediately when negative @depth is found. Otherwise, the device
413 * nodes except the first one won't be unflattened successfully.
414 */
415 if (dad)
416 depth = initial_depth = 1;
417
408 root = dad; 418 root = dad;
409 fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; 419 fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
410 nps[depth] = dad; 420 nps[depth] = dad;
421
411 for (offset = 0; 422 for (offset = 0;
412 offset >= 0 && depth >= 0; 423 offset >= 0 && depth >= initial_depth;
413 offset = fdt_next_node(blob, offset, &depth)) { 424 offset = fdt_next_node(blob, offset, &depth)) {
414 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) 425 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
415 continue; 426 continue;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e7bfc175b8e1..6ec743faabe8 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
386EXPORT_SYMBOL_GPL(of_irq_to_resource); 386EXPORT_SYMBOL_GPL(of_irq_to_resource);
387 387
388/** 388/**
389 * of_irq_get - Decode a node's IRQ and return it as a Linux irq number 389 * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
390 * @dev: pointer to device tree node 390 * @dev: pointer to device tree node
391 * @index: zero-based index of the irq 391 * @index: zero-based index of the IRQ
392 *
393 * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
394 * is not yet created.
395 * 392 *
393 * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
394 * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
395 * of any other failure.
396 */ 396 */
397int of_irq_get(struct device_node *dev, int index) 397int of_irq_get(struct device_node *dev, int index)
398{ 398{
@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
413EXPORT_SYMBOL_GPL(of_irq_get); 413EXPORT_SYMBOL_GPL(of_irq_get);
414 414
415/** 415/**
416 * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number 416 * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
417 * @dev: pointer to device tree node 417 * @dev: pointer to device tree node
418 * @name: irq name 418 * @name: IRQ name
419 * 419 *
420 * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain 420 * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
421 * is not yet created, or error code in case of any other failure. 421 * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
422 * of any other failure.
422 */ 423 */
423int of_irq_get_byname(struct device_node *dev, const char *name) 424int of_irq_get_byname(struct device_node *dev, const char *name)
424{ 425{
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index ed01c0172e4a..216648233874 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
127 } 127 }
128 128
129 /* Need adjust the alignment to satisfy the CMA requirement */ 129 /* Need adjust the alignment to satisfy the CMA requirement */
130 if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) 130 if (IS_ENABLED(CONFIG_CMA)
131 align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 131 && of_flat_dt_is_compatible(node, "shared-dma-pool")
132 && of_get_flat_dt_prop(node, "reusable", NULL)
133 && !of_get_flat_dt_prop(node, "no-map", NULL)) {
134 unsigned long order =
135 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
136
137 align = max(align, (phys_addr_t)PAGE_SIZE << order);
138 }
132 139
133 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 140 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
134 if (prop) { 141 if (prop) {
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index dfbab61a1b47..1fa3a3219c45 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
221 else 221 else
222 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, 222 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
223 *(u16 *)buf); 223 *(u16 *)buf);
224 buf += 2; 224 buf += 4;
225 } 225 }
226 len += 2; 226 len += 4;
227 227
228 /* 228 /*
229 * If we have any Low Priority VCs and a VC Arbitration Table Offset 229 * If we have any Low Priority VCs and a VC Arbitration Table Offset
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f2d01d4d9364..140436a046c0 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
950 950
951 /* For SPIs, we need to track the affinity per IRQ */ 951 /* For SPIs, we need to track the affinity per IRQ */
952 if (using_spi) { 952 if (using_spi) {
953 if (i >= pdev->num_resources) { 953 if (i >= pdev->num_resources)
954 of_node_put(dn);
955 break; 954 break;
956 }
957 955
958 irqs[i] = cpu; 956 irqs[i] = cpu;
959 } 957 }
960 958
961 /* Keep track of the CPUs containing this PMU type */ 959 /* Keep track of the CPUs containing this PMU type */
962 cpumask_set_cpu(cpu, &pmu->supported_cpus); 960 cpumask_set_cpu(cpu, &pmu->supported_cpus);
963 of_node_put(dn);
964 i++; 961 i++;
965 } while (1); 962 } while (1);
966 963
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
995 992
996 armpmu_init(pmu); 993 armpmu_init(pmu);
997 994
998 if (!__oprofile_cpu_pmu)
999 __oprofile_cpu_pmu = pmu;
1000
1001 pmu->plat_device = pdev; 995 pmu->plat_device = pdev;
1002 996
1003 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { 997 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1016,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1016 if (!ret) 1010 if (!ret)
1017 ret = init_fn(pmu); 1011 ret = init_fn(pmu);
1018 } else { 1012 } else {
1019 ret = probe_current_pmu(pmu, probe_table);
1020 cpumask_setall(&pmu->supported_cpus); 1013 cpumask_setall(&pmu->supported_cpus);
1014 ret = probe_current_pmu(pmu, probe_table);
1021 } 1015 }
1022 1016
1023 if (ret) { 1017 if (ret) {
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1033 if (ret) 1027 if (ret)
1034 goto out_destroy; 1028 goto out_destroy;
1035 1029
1030 if (!__oprofile_cpu_pmu)
1031 __oprofile_cpu_pmu = pmu;
1032
1036 pr_info("enabled with %s PMU driver, %d counters available\n", 1033 pr_info("enabled with %s PMU driver, %d counters available\n",
1037 pmu->name, pmu->num_events); 1034 pmu->name, pmu->num_events);
1038 1035
@@ -1043,6 +1040,7 @@ out_destroy:
1043out_free: 1040out_free:
1044 pr_info("%s: failed to register PMU devices!\n", 1041 pr_info("%s: failed to register PMU devices!\n",
1045 of_node_full_name(node)); 1042 of_node_full_name(node));
1043 kfree(pmu->irq_affinity);
1046 kfree(pmu); 1044 kfree(pmu);
1047 return ret; 1045 return ret;
1048} 1046}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index cc093ebfda94..8b851f718123 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data,
233 struct exynos_mipi_video_phy *state) 233 struct exynos_mipi_video_phy *state)
234{ 234{
235 u32 val; 235 u32 val;
236 int ret;
237
238 ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
239 if (ret)
240 return 0;
236 241
237 regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
238 return val & data->resetn_val; 242 return val & data->resetn_val;
239} 243}
240 244
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 0a477d24cf76..bf46844dc387 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
293 ret = ti_pipe3_dpll_wait_lock(phy); 293 ret = ti_pipe3_dpll_wait_lock(phy);
294 } 294 }
295 295
296 /* Program the DPLL only if not locked */ 296 /* SATA has issues if re-programmed when locked */
297 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 297 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
298 if (!(val & PLL_LOCK)) 298 if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
299 if (ti_pipe3_dpll_program(phy)) 299 "ti,phy-pipe3-sata"))
300 return -EINVAL; 300 return ret;
301
302 /* Program the DPLL */
303 ret = ti_pipe3_dpll_program(phy);
304 if (ret) {
305 ti_pipe3_disable_clocks(phy);
306 return -EINVAL;
307 }
301 308
302 return ret; 309 return ret;
303} 310}
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 6b6af6cba454..d9b10a39a2cf 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
463 twl4030_usb_set_mode(twl, twl->usb_mode); 463 twl4030_usb_set_mode(twl, twl->usb_mode);
464 if (twl->usb_mode == T2_USB_MODE_ULPI) 464 if (twl->usb_mode == T2_USB_MODE_ULPI)
465 twl4030_i2c_access(twl, 0); 465 twl4030_i2c_access(twl, 0);
466 schedule_delayed_work(&twl->id_workaround_work, 0); 466 twl->linkstat = MUSB_UNKNOWN;
467 schedule_delayed_work(&twl->id_workaround_work, HZ);
467 468
468 return 0; 469 return 0;
469} 470}
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
537 struct twl4030_usb *twl = _twl; 538 struct twl4030_usb *twl = _twl;
538 enum musb_vbus_id_status status; 539 enum musb_vbus_id_status status;
539 bool status_changed = false; 540 bool status_changed = false;
541 int err;
540 542
541 status = twl4030_usb_linkstat(twl); 543 status = twl4030_usb_linkstat(twl);
542 544
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
567 pm_runtime_mark_last_busy(twl->dev); 569 pm_runtime_mark_last_busy(twl->dev);
568 pm_runtime_put_autosuspend(twl->dev); 570 pm_runtime_put_autosuspend(twl->dev);
569 } 571 }
570 musb_mailbox(status); 572 err = musb_mailbox(status);
573 if (err)
574 twl->linkstat = MUSB_UNKNOWN;
571 } 575 }
572 576
573 /* don't schedule during sleep - irq works right then */ 577 /* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
595 struct twl4030_usb *twl = phy_get_drvdata(phy); 599 struct twl4030_usb *twl = phy_get_drvdata(phy);
596 600
597 pm_runtime_get_sync(twl->dev); 601 pm_runtime_get_sync(twl->dev);
598 schedule_delayed_work(&twl->id_workaround_work, 0); 602 twl->linkstat = MUSB_UNKNOWN;
603 schedule_delayed_work(&twl->id_workaround_work, HZ);
599 pm_runtime_mark_last_busy(twl->dev); 604 pm_runtime_mark_last_busy(twl->dev);
600 pm_runtime_put_autosuspend(twl->dev); 605 pm_runtime_put_autosuspend(twl->dev);
601 606
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
763 if (cable_present(twl->linkstat)) 768 if (cable_present(twl->linkstat))
764 pm_runtime_put_noidle(twl->dev); 769 pm_runtime_put_noidle(twl->dev);
765 pm_runtime_mark_last_busy(twl->dev); 770 pm_runtime_mark_last_busy(twl->dev);
766 pm_runtime_put_sync_suspend(twl->dev); 771 pm_runtime_dont_use_autosuspend(&pdev->dev);
772 pm_runtime_put_sync(twl->dev);
767 pm_runtime_disable(twl->dev); 773 pm_runtime_disable(twl->dev);
768 774
769 /* autogate 60MHz ULPI clock, 775 /* autogate 60MHz ULPI clock,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 207b13b618cf..a607655d7830 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
1256 const struct mtk_desc_pin *pin; 1256 const struct mtk_desc_pin *pin;
1257 1257
1258 chained_irq_enter(chip, desc); 1258 chained_irq_enter(chip, desc);
1259 for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { 1259 for (eint_num = 0;
1260 eint_num < pctl->devdata->ap_num;
1261 eint_num += 32, reg += 4) {
1260 status = readl(reg); 1262 status = readl(reg);
1261 reg += 4;
1262 while (status) { 1263 while (status) {
1263 offset = __ffs(status); 1264 offset = __ffs(status);
1264 index = eint_num + offset; 1265 index = eint_num + offset;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ccbfc325c778..38faceff2f08 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
854 854
855 clk_enable(nmk_chip->clk); 855 clk_enable(nmk_chip->clk);
856 856
857 dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); 857 dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
858 858
859 clk_disable(nmk_chip->clk); 859 clk_disable(nmk_chip->clk);
860 860
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c06bb85c2839..3ec0025d19e7 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -103,7 +103,6 @@ config DELL_SMBIOS
103 103
104config DELL_LAPTOP 104config DELL_LAPTOP
105 tristate "Dell Laptop Extras" 105 tristate "Dell Laptop Extras"
106 depends on X86
107 depends on DELL_SMBIOS 106 depends on DELL_SMBIOS
108 depends on DMI 107 depends on DMI
109 depends on BACKLIGHT_CLASS_DEVICE 108 depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
505 504
506config SENSORS_HDAPS 505config SENSORS_HDAPS
507 tristate "Thinkpad Hard Drive Active Protection System (hdaps)" 506 tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
508 depends on INPUT && X86 507 depends on INPUT
509 select INPUT_POLLDEV 508 select INPUT_POLLDEV
510 default n 509 default n
511 help 510 help
@@ -749,7 +748,7 @@ config TOSHIBA_WMI
749 748
750config ACPI_CMPC 749config ACPI_CMPC
751 tristate "CMPC Laptop Extras" 750 tristate "CMPC Laptop Extras"
752 depends on X86 && ACPI 751 depends on ACPI
753 depends on RFKILL || RFKILL=n 752 depends on RFKILL || RFKILL=n
754 select INPUT 753 select INPUT
755 select BACKLIGHT_CLASS_DEVICE 754 select BACKLIGHT_CLASS_DEVICE
@@ -848,7 +847,7 @@ config INTEL_IMR
848 847
849config INTEL_PMC_CORE 848config INTEL_PMC_CORE
850 bool "Intel PMC Core driver" 849 bool "Intel PMC Core driver"
851 depends on X86 && PCI 850 depends on PCI
852 ---help--- 851 ---help---
853 The Intel Platform Controller Hub for Intel Core SoCs provides access 852 The Intel Platform Controller Hub for Intel Core SoCs provides access
854 to Power Management Controller registers via a PCI interface. This 853 to Power Management Controller registers via a PCI interface. This
@@ -860,7 +859,7 @@ config INTEL_PMC_CORE
860 859
861config IBM_RTL 860config IBM_RTL
862 tristate "Device driver to enable PRTL support" 861 tristate "Device driver to enable PRTL support"
863 depends on X86 && PCI 862 depends on PCI
864 ---help--- 863 ---help---
865 Enable support for IBM Premium Real Time Mode (PRTM). 864 Enable support for IBM Premium Real Time Mode (PRTM).
866 This module will allow you the enter and exit PRTM in the BIOS via 865 This module will allow you the enter and exit PRTM in the BIOS via
@@ -894,7 +893,6 @@ config XO15_EBOOK
894 893
895config SAMSUNG_LAPTOP 894config SAMSUNG_LAPTOP
896 tristate "Samsung Laptop driver" 895 tristate "Samsung Laptop driver"
897 depends on X86
898 depends on RFKILL || RFKILL = n 896 depends on RFKILL || RFKILL = n
899 depends on ACPI_VIDEO || ACPI_VIDEO = n 897 depends on ACPI_VIDEO || ACPI_VIDEO = n
900 depends on BACKLIGHT_CLASS_DEVICE 898 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 4a23fbc66b71..d1a091b93192 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
567static const struct key_entry ideapad_keymap[] = { 567static const struct key_entry ideapad_keymap[] = {
568 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, 568 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
569 { KE_KEY, 7, { KEY_CAMERA } }, 569 { KE_KEY, 7, { KEY_CAMERA } },
570 { KE_KEY, 8, { KEY_MICMUTE } },
570 { KE_KEY, 11, { KEY_F16 } }, 571 { KE_KEY, 11, { KEY_F16 } },
571 { KE_KEY, 13, { KEY_WLAN } }, 572 { KE_KEY, 13, { KEY_WLAN } },
572 { KE_KEY, 16, { KEY_PROG1 } }, 573 { KE_KEY, 16, { KEY_PROG1 } },
@@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
809 break; 810 break;
810 case 13: 811 case 13:
811 case 11: 812 case 11:
813 case 8:
812 case 7: 814 case 7:
813 case 6: 815 case 6:
814 ideapad_input_report(priv, vpc_bit); 816 ideapad_input_report(priv, vpc_bit);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c3bfa1fe95bf..b65ce7519411 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
2043 2043
2044static u32 hotkey_orig_mask; /* events the BIOS had enabled */ 2044static u32 hotkey_orig_mask; /* events the BIOS had enabled */
2045static u32 hotkey_all_mask; /* all events supported in fw */ 2045static u32 hotkey_all_mask; /* all events supported in fw */
2046static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
2046static u32 hotkey_reserved_mask; /* events better left disabled */ 2047static u32 hotkey_reserved_mask; /* events better left disabled */
2047static u32 hotkey_driver_mask; /* events needed by the driver */ 2048static u32 hotkey_driver_mask; /* events needed by the driver */
2048static u32 hotkey_user_mask; /* events visible to userspace */ 2049static u32 hotkey_user_mask; /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
2742 2743
2743static DEVICE_ATTR_RO(hotkey_all_mask); 2744static DEVICE_ATTR_RO(hotkey_all_mask);
2744 2745
2746/* sysfs hotkey all_mask ----------------------------------------------- */
2747static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
2748 struct device_attribute *attr,
2749 char *buf)
2750{
2751 return snprintf(buf, PAGE_SIZE, "0x%08x\n",
2752 hotkey_adaptive_all_mask | hotkey_source_mask);
2753}
2754
2755static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
2756
2745/* sysfs hotkey recommended_mask --------------------------------------- */ 2757/* sysfs hotkey recommended_mask --------------------------------------- */
2746static ssize_t hotkey_recommended_mask_show(struct device *dev, 2758static ssize_t hotkey_recommended_mask_show(struct device *dev,
2747 struct device_attribute *attr, 2759 struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
2985 &dev_attr_wakeup_hotunplug_complete.attr, 2997 &dev_attr_wakeup_hotunplug_complete.attr,
2986 &dev_attr_hotkey_mask.attr, 2998 &dev_attr_hotkey_mask.attr,
2987 &dev_attr_hotkey_all_mask.attr, 2999 &dev_attr_hotkey_all_mask.attr,
3000 &dev_attr_hotkey_adaptive_all_mask.attr,
2988 &dev_attr_hotkey_recommended_mask.attr, 3001 &dev_attr_hotkey_recommended_mask.attr,
2989#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3002#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2990 &dev_attr_hotkey_source_mask.attr, 3003 &dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3321 if (!tp_features.hotkey) 3334 if (!tp_features.hotkey)
3322 return 1; 3335 return 1;
3323 3336
3324 /*
3325 * Check if we have an adaptive keyboard, like on the
3326 * Lenovo Carbon X1 2014 (2nd Gen).
3327 */
3328 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
3329 if ((hkeyv >> 8) == 2) {
3330 tp_features.has_adaptive_kbd = true;
3331 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
3332 &adaptive_kbd_attr_group);
3333 if (res)
3334 goto err_exit;
3335 }
3336 }
3337
3338 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, 3337 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
3339 ARRAY_SIZE(tpacpi_hotkey_qtable)); 3338 ARRAY_SIZE(tpacpi_hotkey_qtable));
3340 3339
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3357 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking 3356 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
3358 for HKEY interface version 0x100 */ 3357 for HKEY interface version 0x100 */
3359 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3358 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
3360 if ((hkeyv >> 8) != 1) { 3359 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3361 pr_err("unknown version of the HKEY interface: 0x%x\n", 3360 "firmware HKEY interface version: 0x%x\n",
3362 hkeyv); 3361 hkeyv);
3363 pr_err("please report this to %s\n", TPACPI_MAIL); 3362
3364 } else { 3363 switch (hkeyv >> 8) {
3364 case 1:
3365 /* 3365 /*
3366 * MHKV 0x100 in A31, R40, R40e, 3366 * MHKV 0x100 in A31, R40, R40e,
3367 * T4x, X31, and later 3367 * T4x, X31, and later
3368 */ 3368 */
3369 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3370 "firmware HKEY interface version: 0x%x\n",
3371 hkeyv);
3372 3369
3373 /* Paranoia check AND init hotkey_all_mask */ 3370 /* Paranoia check AND init hotkey_all_mask */
3374 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3371 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3375 "MHKA", "qd")) { 3372 "MHKA", "qd")) {
3376 pr_err("missing MHKA handler, " 3373 pr_err("missing MHKA handler, please report this to %s\n",
3377 "please report this to %s\n",
3378 TPACPI_MAIL); 3374 TPACPI_MAIL);
3379 /* Fallback: pre-init for FN+F3,F4,F12 */ 3375 /* Fallback: pre-init for FN+F3,F4,F12 */
3380 hotkey_all_mask = 0x080cU; 3376 hotkey_all_mask = 0x080cU;
3381 } else { 3377 } else {
3382 tp_features.hotkey_mask = 1; 3378 tp_features.hotkey_mask = 1;
3383 } 3379 }
3380 break;
3381
3382 case 2:
3383 /*
3384 * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
3385 */
3386
3387 /* Paranoia check AND init hotkey_all_mask */
3388 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3389 "MHKA", "dd", 1)) {
3390 pr_err("missing MHKA handler, please report this to %s\n",
3391 TPACPI_MAIL);
3392 /* Fallback: pre-init for FN+F3,F4,F12 */
3393 hotkey_all_mask = 0x080cU;
3394 } else {
3395 tp_features.hotkey_mask = 1;
3396 }
3397
3398 /*
3399 * Check if we have an adaptive keyboard, like on the
3400 * Lenovo Carbon X1 2014 (2nd Gen).
3401 */
3402 if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
3403 "MHKA", "dd", 2)) {
3404 if (hotkey_adaptive_all_mask != 0) {
3405 tp_features.has_adaptive_kbd = true;
3406 res = sysfs_create_group(
3407 &tpacpi_pdev->dev.kobj,
3408 &adaptive_kbd_attr_group);
3409 if (res)
3410 goto err_exit;
3411 }
3412 } else {
3413 tp_features.has_adaptive_kbd = false;
3414 hotkey_adaptive_all_mask = 0x0U;
3415 }
3416 break;
3417
3418 default:
3419 pr_err("unknown version of the HKEY interface: 0x%x\n",
3420 hkeyv);
3421 pr_err("please report this to %s\n", TPACPI_MAIL);
3422 break;
3384 } 3423 }
3385 } 3424 }
3386 3425
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 579fd65299a0..d637c933c8a9 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
208 break; 208 break;
209 209
210 case PTP_SYS_OFFSET: 210 case PTP_SYS_OFFSET:
211 sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); 211 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
212 if (!sysoff) { 212 if (IS_ERR(sysoff)) {
213 err = -ENOMEM; 213 err = PTR_ERR(sysoff);
214 break; 214 sysoff = NULL;
215 }
216 if (copy_from_user(sysoff, (void __user *)arg,
217 sizeof(*sysoff))) {
218 err = -EFAULT;
219 break; 215 break;
220 } 216 }
221 if (sysoff->n_samples > PTP_MAX_SAMPLES) { 217 if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index dba3843c53b8..ed337a8c34ab 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
457{ 457{
458 int err; 458 int err;
459 459
460 if (!pwm) 460 if (!pwm || !state || !state->period ||
461 state->duty_cycle > state->period)
461 return -EINVAL; 462 return -EINVAL;
462 463
463 if (!memcmp(state, &pwm->state, sizeof(*state))) 464 if (!memcmp(state, &pwm->state, sizeof(*state)))
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index f994c7eaf41c..14fc011faa32 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
272 chip->chip.of_pwm_n_cells = 3; 272 chip->chip.of_pwm_n_cells = 3;
273 chip->chip.can_sleep = 1; 273 chip->chip.can_sleep = 1;
274 274
275 ret = pwmchip_add(&chip->chip); 275 ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
276 if (ret) { 276 if (ret) {
277 clk_disable_unprepare(hlcdc->periph_clk); 277 clk_disable_unprepare(hlcdc->periph_clk);
278 return ret; 278 return ret;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index d98599249a05..01695d48dd54 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child,
152 goto unlock; 152 goto unlock;
153 } 153 }
154 154
155 pwm_apply_state(pwm, &state); 155 ret = pwm_apply_state(pwm, &state);
156 156
157unlock: 157unlock:
158 mutex_unlock(&export->lock); 158 mutex_unlock(&export->lock);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 56a17ec5b5ef..526bf23dcb49 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
140 .enable = rpm_reg_enable, 140 .enable = rpm_reg_enable,
141 .disable = rpm_reg_disable, 141 .disable = rpm_reg_disable,
142 .is_enabled = rpm_reg_is_enabled, 142 .is_enabled = rpm_reg_is_enabled,
143 .list_voltage = regulator_list_voltage_linear_range,
144
145 .get_voltage = rpm_reg_get_voltage,
146 .set_voltage = rpm_reg_set_voltage,
147
148 .set_load = rpm_reg_set_load,
149};
150
151static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
152 .enable = rpm_reg_enable,
153 .disable = rpm_reg_disable,
154 .is_enabled = rpm_reg_is_enabled,
155 .list_voltage = regulator_list_voltage_linear_range,
143 156
144 .get_voltage = rpm_reg_get_voltage, 157 .get_voltage = rpm_reg_get_voltage,
145 .set_voltage = rpm_reg_set_voltage, 158 .set_voltage = rpm_reg_set_voltage,
@@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = {
247static const struct regulator_desc pm8941_lnldo = { 260static const struct regulator_desc pm8941_lnldo = {
248 .fixed_uV = 1740000, 261 .fixed_uV = 1740000,
249 .n_voltages = 1, 262 .n_voltages = 1,
250 .ops = &rpm_smps_ldo_ops, 263 .ops = &rpm_smps_ldo_ops_fixed,
251}; 264};
252 265
253static const struct regulator_desc pm8941_switch = { 266static const struct regulator_desc pm8941_switch = {
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 572816e30095..c139890c1514 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
94 int ramp_delay) 94 int ramp_delay)
95{ 95{
96 struct tps51632_chip *tps = rdev_get_drvdata(rdev); 96 struct tps51632_chip *tps = rdev_get_drvdata(rdev);
97 int bit = ramp_delay/6000; 97 int bit;
98 int ret; 98 int ret;
99 99
100 if (bit) 100 if (ramp_delay == 0)
101 bit--; 101 bit = 0;
102 else
103 bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
104
102 ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); 105 ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
103 if (ret < 0) 106 if (ret < 0)
104 dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); 107 dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index d4c285688ce9..3ddc85e6efd6 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1122 } else { 1122 } else {
1123 struct scsi_cmnd *SCp; 1123 struct scsi_cmnd *SCp;
1124 1124
1125 SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG); 1125 SCp = SDp->current_cmnd;
1126 if(unlikely(SCp == NULL)) { 1126 if(unlikely(SCp == NULL)) {
1127 sdev_printk(KERN_ERR, SDp, 1127 sdev_printk(KERN_ERR, SDp,
1128 "no saved request for untagged cmd\n"); 1128 "no saved request for untagged cmd\n");
@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1826 slot->tag, slot); 1826 slot->tag, slot);
1827 } else { 1827 } else {
1828 slot->tag = SCSI_NO_TAG; 1828 slot->tag = SCSI_NO_TAG;
1829 /* must populate current_cmnd for scsi_host_find_tag to work */ 1829 /* save current command for reselection */
1830 SCp->device->current_cmnd = SCp; 1830 SCp->device->current_cmnd = SCp;
1831 } 1831 }
1832 /* sanity check: some of the commands generated by the mid-layer 1832 /* sanity check: some of the commands generated by the mid-layer
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e77104..969c312de1be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@ struct aac_driver_ident
621#define AAC_QUIRK_SCSI_32 0x0020 621#define AAC_QUIRK_SCSI_32 0x0020
622 622
623/* 623/*
624 * SRC based adapters support the AifReqEvent functions
625 */
626#define AAC_QUIRK_SRC 0x0040
627
628/*
624 * The adapter interface specs all queues to be located in the same 629 * The adapter interface specs all queues to be located in the same
625 * physically contiguous block. The host structure that defines the 630 * physically contiguous block. The host structure that defines the
626 * commuication queues will assume they are each a separate physically 631 * commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd230bc2..79871f3519ff 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
236 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ 236 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
237 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 237 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
238 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ 238 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
239 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ 239 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ 240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
241 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ 241 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
242 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ 242 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
243}; 243};
244 244
245/** 245/**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1299 else 1299 else
1300 shost->this_id = shost->max_id; 1300 shost->this_id = shost->max_id;
1301 1301
1302 aac_intr_normal(aac, 0, 2, 0, NULL); 1302 if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
1303 aac_intr_normal(aac, 0, 2, 0, NULL);
1303 1304
1304 /* 1305 /*
1305 * dmb - we may need to move the setting of these parms somewhere else once 1306 * dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a315e9..6bff13e7afc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
7975 ActiveCableEventData = 7975 ActiveCableEventData =
7976 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 7976 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
7977 if (ActiveCableEventData->ReasonCode == 7977 if (ActiveCableEventData->ReasonCode ==
7978 MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) 7978 MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
7979 pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", 7979 pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
7980 ioc->name, ActiveCableEventData->ReceptacleID); 7980 ioc->name, ActiveCableEventData->ReceptacleID);
7981 pr_info("cannot be powered and devices connected to this active cable"); 7981 pr_info("cannot be powered and devices connected to this active cable");
7982 pr_info("will not be seen. This active cable"); 7982 pr_info("will not be seen. This active cable");
7983 pr_info("requires %d mW of power", 7983 pr_info("requires %d mW of power",
7984 ActiveCableEventData->ActiveCablePowerRequirement); 7984 ActiveCableEventData->ActiveCablePowerRequirement);
7985 }
7985 break; 7986 break;
7986 7987
7987 default: /* ignore the rest */ 7988 default: /* ignore the rest */
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3408578b08d6..ff41c310c900 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -230,6 +230,7 @@ static struct {
230 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 230 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
231 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, 231 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
232 {"Promise", "", NULL, BLIST_SPARSELUN}, 232 {"Promise", "", NULL, BLIST_SPARSELUN},
233 {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
233 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, 234 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
234 {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, 235 {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
235 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, 236 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a8b610eaa0ca..106a6adbd6f1 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1128 */ 1128 */
1129void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) 1129void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1130{ 1130{
1131 scmd->device->host->host_failed--;
1132 scmd->eh_eflags = 0; 1131 scmd->eh_eflags = 0;
1133 list_move_tail(&scmd->eh_entry, done_q); 1132 list_move_tail(&scmd->eh_entry, done_q);
1134} 1133}
@@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data)
2227 else 2226 else
2228 scsi_unjam_host(shost); 2227 scsi_unjam_host(shost);
2229 2228
2229 /* All scmds have been handled */
2230 shost->host_failed = 0;
2231
2230 /* 2232 /*
2231 * Note - if the above fails completely, the action is to take 2233 * Note - if the above fails completely, the action is to take
2232 * individual devices offline and flush the queue of any 2234 * individual devices offline and flush the queue of any
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332af0f51..c71344aebdbb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
821 } 821 }
822 822
823 /* 823 /*
824 * If we finished all bytes in the request we are done now. 824 * special case: failed zero length commands always need to
825 * drop down into the retry code. Otherwise, if we finished
826 * all bytes in the request we are done now.
825 */ 827 */
826 if (!scsi_end_request(req, error, good_bytes, 0)) 828 if (!(blk_rq_bytes(req) == 0 && error) &&
829 !scsi_end_request(req, error, good_bytes, 0))
827 return; 830 return;
828 831
829 /* 832 /*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03ef02b2..60bff78e9ead 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
1398 **/ 1398 **/
1399static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1399static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1400{ 1400{
1401 struct scsi_disk *sdkp = scsi_disk(disk); 1401 struct scsi_disk *sdkp = scsi_disk_get(disk);
1402 struct scsi_device *sdp = sdkp->device; 1402 struct scsi_device *sdp;
1403 struct scsi_sense_hdr *sshdr = NULL; 1403 struct scsi_sense_hdr *sshdr = NULL;
1404 int retval; 1404 int retval;
1405 1405
1406 if (!sdkp)
1407 return 0;
1408
1409 sdp = sdkp->device;
1406 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1410 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1407 1411
1408 /* 1412 /*
@@ -1459,6 +1463,7 @@ out:
1459 kfree(sshdr); 1463 kfree(sshdr);
1460 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1464 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1461 sdp->changed = 0; 1465 sdp->changed = 0;
1466 scsi_disk_put(sdkp);
1462 return retval; 1467 return retval;
1463} 1468}
1464 1469
@@ -2862,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
2862 if (sdkp->opt_xfer_blocks && 2867 if (sdkp->opt_xfer_blocks &&
2863 sdkp->opt_xfer_blocks <= dev_max && 2868 sdkp->opt_xfer_blocks <= dev_max &&
2864 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2869 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2865 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) 2870 logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
2866 rw_max = q->limits.io_opt = 2871 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2867 sdkp->opt_xfer_blocks * sdp->sector_size; 2872 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2868 else 2873 } else
2869 rw_max = BLK_DEF_MAX_SECTORS; 2874 rw_max = BLK_DEF_MAX_SECTORS;
2870 2875
2871 /* Combine with controller limits */ 2876 /* Combine with controller limits */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 654630bb7d0e..765a6f1ac1b7 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
151 return blocks << (ilog2(sdev->sector_size) - 9); 151 return blocks << (ilog2(sdev->sector_size) - 9);
152} 152}
153 153
154static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
155{
156 return blocks * sdev->sector_size;
157}
158
154/* 159/*
155 * A DIF-capable target device can be formatted with different 160 * A DIF-capable target device can be formatted with different
156 * protection schemes. Currently 0 through 3 are defined: 161 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index b56885c14839..ebb34dca60df 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -68,7 +68,8 @@ struct sync_timeline {
68 68
69 /* protected by child_list_lock */ 69 /* protected by child_list_lock */
70 bool destroyed; 70 bool destroyed;
71 int context, value; 71 u64 context;
72 int value;
72 73
73 struct list_head child_list_head; 74 struct list_head child_list_head;
74 spinlock_t child_list_lock; 75 spinlock_t child_list_lock;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index bbfee53cfcf5..845e49a52430 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2521 return 0; 2521 return 0;
2522 2522
2523 failed: 2523 failed:
2524 if (ni) 2524 if (ni) {
2525 lnet_ni_decref(ni); 2525 lnet_ni_decref(ni);
2526 rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
2527 rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
2528 }
2526 2529
2527 rej.ibr_version = version; 2530 rej.ibr_version = version;
2528 rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
2529 rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
2530 kiblnd_reject(cmid, &rej); 2531 kiblnd_reject(cmid, &rej);
2531 2532
2532 return -ECONNREFUSED; 2533 return -ECONNREFUSED;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index c17870cddb5b..fbce1f7e68ca 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
102 if (!efuseTbl) 102 if (!efuseTbl)
103 return; 103 return;
104 104
105 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord)); 105 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
106 if (!eFuseWord) { 106 if (!eFuseWord) {
107 DBG_88E("%s: alloc eFuseWord fail!\n", __func__); 107 DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
108 goto eFuseWord_failed; 108 goto eFuseWord_failed;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 87ea3b844951..363f3a34ddce 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
2072{ 2072{
2073 struct hal_ops *halfunc = &adapt->HalFunc; 2073 struct hal_ops *halfunc = &adapt->HalFunc;
2074 2074
2075 adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL); 2075
2076 adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
2076 if (!adapt->HalData) 2077 if (!adapt->HalData)
2077 DBG_88E("cant not alloc memory for HAL DATA\n"); 2078 DBG_88E("cant not alloc memory for HAL DATA\n");
2078 2079
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6ceac4f2d4b2..5b4b47ed948b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
857 goto free_power_table; 857 goto free_power_table;
858 } 858 }
859 859
860 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
861 cpufreq_dev->id);
862
863 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
864 &cpufreq_cooling_ops);
865 if (IS_ERR(cool_dev))
866 goto remove_idr;
867
868 /* Fill freq-table in descending order of frequencies */ 860 /* Fill freq-table in descending order of frequencies */
869 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { 861 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
870 freq = find_next_max(table, freq); 862 freq = find_next_max(table, freq);
@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
877 pr_debug("%s: freq:%u KHz\n", __func__, freq); 869 pr_debug("%s: freq:%u KHz\n", __func__, freq);
878 } 870 }
879 871
872 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
873 cpufreq_dev->id);
874
875 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
876 &cpufreq_cooling_ops);
877 if (IS_ERR(cool_dev))
878 goto remove_idr;
879
880 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; 880 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
881 cpufreq_dev->cool_dev = cool_dev; 881 cpufreq_dev->cool_dev = cool_dev;
882 882
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index 13d431cbd29e..a578cd257db4 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev)
177 return -ENODEV; 177 return -ENODEV;
178 d->raw_bd = bd; 178 d->raw_bd = bd;
179 179
180 ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); 180 ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
181 if (ret) 181 if (ret)
182 return ret; 182 return ret;
183 183
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 82c4d2e45319..95103054c0e4 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -120,17 +120,6 @@ config UNIX98_PTYS
120 All modern Linux systems use the Unix98 ptys. Say Y unless 120 All modern Linux systems use the Unix98 ptys. Say Y unless
121 you're on an embedded system and want to conserve memory. 121 you're on an embedded system and want to conserve memory.
122 122
123config DEVPTS_MULTIPLE_INSTANCES
124 bool "Support multiple instances of devpts"
125 depends on UNIX98_PTYS
126 default n
127 ---help---
128 Enable support for multiple instances of devpts filesystem.
129 If you want to have isolated PTY namespaces (eg: in containers),
130 say Y here. Otherwise, say N. If enabled, each mount of devpts
131 filesystem with the '-o newinstance' option will create an
132 independent PTY namespace.
133
134config LEGACY_PTYS 123config LEGACY_PTYS
135 bool "Legacy (BSD) PTY support" 124 bool "Legacy (BSD) PTY support"
136 default y 125 default y
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index dd4b8417e7f4..f856c4544eea 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
668 else 668 else
669 fsi = tty->link->driver_data; 669 fsi = tty->link->driver_data;
670 devpts_kill_index(fsi, tty->index); 670 devpts_kill_index(fsi, tty->index);
671 devpts_put_ref(fsi); 671 devpts_release(fsi);
672} 672}
673 673
674static const struct tty_operations ptm_unix98_ops = { 674static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
733 if (retval) 733 if (retval)
734 return retval; 734 return retval;
735 735
736 fsi = devpts_get_ref(inode, filp); 736 fsi = devpts_acquire(filp);
737 retval = -ENODEV; 737 if (IS_ERR(fsi)) {
738 if (!fsi) 738 retval = PTR_ERR(fsi);
739 goto out_free_file; 739 goto out_free_file;
740 }
740 741
741 /* find a device that is not in use. */ 742 /* find a device that is not in use. */
742 mutex_lock(&devpts_mutex); 743 mutex_lock(&devpts_mutex);
@@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
745 746
746 retval = index; 747 retval = index;
747 if (index < 0) 748 if (index < 0)
748 goto out_put_ref; 749 goto out_put_fsi;
749 750
750 751
751 mutex_lock(&tty_mutex); 752 mutex_lock(&tty_mutex);
@@ -789,8 +790,8 @@ err_release:
789 return retval; 790 return retval;
790out: 791out:
791 devpts_kill_index(fsi, index); 792 devpts_kill_index(fsi, index);
792out_put_ref: 793out_put_fsi:
793 devpts_put_ref(fsi); 794 devpts_release(fsi);
794out_free_file: 795out_free_file:
795 tty_free_file(filp); 796 tty_free_file(filp);
796 return retval; 797 return retval;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6dc810bce295..944a6dca0fcb 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
44 /* Creative SB Audigy 2 NX */ 44 /* Creative SB Audigy 2 NX */
45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
46 46
47 /* USB3503 */
48 { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
49
47 /* Microsoft Wireless Laser Mouse 6000 Receiver */ 50 /* Microsoft Wireless Laser Mouse 6000 Receiver */
48 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
49 52
@@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
173 /* MAYA44USB sound device */ 176 /* MAYA44USB sound device */
174 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, 177 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
175 178
179 /* ASUS Base Station(T100) */
180 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
181 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
182
176 /* Action Semiconductor flash disk */ 183 /* Action Semiconductor flash disk */
177 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 184 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
178 USB_QUIRK_STRING_FETCH_255 }, 185 USB_QUIRK_STRING_FETCH_255 },
@@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
188 { USB_DEVICE(0x1908, 0x1315), .driver_info = 195 { USB_DEVICE(0x1908, 0x1315), .driver_info =
189 USB_QUIRK_HONOR_BNUMINTERFACES }, 196 USB_QUIRK_HONOR_BNUMINTERFACES },
190 197
191 /* INTEL VALUE SSD */
192 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
193
194 /* USB3503 */
195 { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
196
197 /* ASUS Base Station(T100) */
198 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
199 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
200
201 /* Protocol and OTG Electrical Test Device */ 198 /* Protocol and OTG Electrical Test Device */
202 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 199 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
203 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 200 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
204 201
202 /* Acer C120 LED Projector */
203 { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
204
205 /* Blackmagic Design Intensity Shuttle */ 205 /* Blackmagic Design Intensity Shuttle */
206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, 206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
207 207
208 /* Blackmagic Design UltraStudio SDI */ 208 /* Blackmagic Design UltraStudio SDI */
209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, 209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
210 210
211 /* INTEL VALUE SSD */
212 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
213
211 { } /* terminating entry must be last */ 214 { } /* terminating entry must be last */
212}; 215};
213 216
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 3c58d633ce80..dec0b21fc626 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -64,6 +64,17 @@
64 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ 64 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
65 dev_name(hsotg->dev), ##__VA_ARGS__) 65 dev_name(hsotg->dev), ##__VA_ARGS__)
66 66
67#ifdef CONFIG_MIPS
68/*
69 * There are some MIPS machines that can run in either big-endian
70 * or little-endian mode and that use the dwc2 register without
71 * a byteswap in both ways.
72 * Unlike other architectures, MIPS apparently does not require a
73 * barrier before the __raw_writel() to synchronize with DMA but does
74 * require the barrier after the __raw_writel() to serialize a set of
75 * writes. This set of operations was added specifically for MIPS and
76 * should only be used there.
77 */
67static inline u32 dwc2_readl(const void __iomem *addr) 78static inline u32 dwc2_readl(const void __iomem *addr)
68{ 79{
69 u32 value = __raw_readl(addr); 80 u32 value = __raw_readl(addr);
@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
90 pr_info("INFO:: wrote %08x to %p\n", value, addr); 101 pr_info("INFO:: wrote %08x to %p\n", value, addr);
91#endif 102#endif
92} 103}
104#else
105/* Normal architectures just use readl/write */
106static inline u32 dwc2_readl(const void __iomem *addr)
107{
108 return readl(addr);
109}
110
111static inline void dwc2_writel(u32 value, void __iomem *addr)
112{
113 writel(value, addr);
114
115#ifdef DWC2_LOG_WRITES
116 pr_info("info:: wrote %08x to %p\n", value, addr);
117#endif
118}
119#endif
93 120
94/* Maximum number of Endpoints/HostChannels */ 121/* Maximum number of Endpoints/HostChannels */
95#define MAX_EPS_CHANNELS 16 122#define MAX_EPS_CHANNELS 16
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4c5e3005e1dc..26cf09d0fe3c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1018 return 1; 1018 return 1;
1019} 1019}
1020 1020
1021static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value); 1021static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1022 1022
1023/** 1023/**
1024 * get_ep_head - return the first request on the endpoint 1024 * get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1094 case USB_ENDPOINT_HALT: 1094 case USB_ENDPOINT_HALT:
1095 halted = ep->halted; 1095 halted = ep->halted;
1096 1096
1097 dwc2_hsotg_ep_sethalt(&ep->ep, set); 1097 dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1098 1098
1099 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1099 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1100 if (ret) { 1100 if (ret) {
@@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2948 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 2948 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
2949 * @ep: The endpoint to set halt. 2949 * @ep: The endpoint to set halt.
2950 * @value: Set or unset the halt. 2950 * @value: Set or unset the halt.
2951 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
2952 * the endpoint is busy processing requests.
2953 *
2954 * We need to stall the endpoint immediately if request comes from set_feature
2955 * protocol command handler.
2951 */ 2956 */
2952static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) 2957static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
2953{ 2958{
2954 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 2959 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
2955 struct dwc2_hsotg *hs = hs_ep->parent; 2960 struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2969 return 0; 2974 return 0;
2970 } 2975 }
2971 2976
2977 if (hs_ep->isochronous) {
2978 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
2979 return -EINVAL;
2980 }
2981
2982 if (!now && value && !list_empty(&hs_ep->queue)) {
2983 dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
2984 ep->name);
2985 return -EAGAIN;
2986 }
2987
2972 if (hs_ep->dir_in) { 2988 if (hs_ep->dir_in) {
2973 epreg = DIEPCTL(index); 2989 epreg = DIEPCTL(index);
2974 epctl = dwc2_readl(hs->regs + epreg); 2990 epctl = dwc2_readl(hs->regs + epreg);
@@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
3020 int ret = 0; 3036 int ret = 0;
3021 3037
3022 spin_lock_irqsave(&hs->lock, flags); 3038 spin_lock_irqsave(&hs->lock, flags);
3023 ret = dwc2_hsotg_ep_sethalt(ep, value); 3039 ret = dwc2_hsotg_ep_sethalt(ep, value, false);
3024 spin_unlock_irqrestore(&hs->lock, flags); 3040 spin_unlock_irqrestore(&hs->lock, flags);
3025 3041
3026 return ret; 3042 return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7ddf9449a063..654050684f4f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -402,6 +402,7 @@
402#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) 402#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
403#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) 403#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
404#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) 404#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
405#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
405#define DWC3_DEPCMD_CMDACT (1 << 10) 406#define DWC3_DEPCMD_CMDACT (1 << 10)
406#define DWC3_DEPCMD_CMDIOC (1 << 8) 407#define DWC3_DEPCMD_CMDIOC (1 << 8)
407 408
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index dd5cb5577dca..2f1fb7e7aa54 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
128 128
129 platform_set_drvdata(pdev, exynos); 129 platform_set_drvdata(pdev, exynos);
130 130
131 ret = dwc3_exynos_register_phys(exynos);
132 if (ret) {
133 dev_err(dev, "couldn't register PHYs\n");
134 return ret;
135 }
136
137 exynos->dev = dev; 131 exynos->dev = dev;
138 132
139 exynos->clk = devm_clk_get(dev, "usbdrd30"); 133 exynos->clk = devm_clk_get(dev, "usbdrd30");
@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
183 goto err3; 177 goto err3;
184 } 178 }
185 179
180 ret = dwc3_exynos_register_phys(exynos);
181 if (ret) {
182 dev_err(dev, "couldn't register PHYs\n");
183 goto err4;
184 }
185
186 if (node) { 186 if (node) {
187 ret = of_platform_populate(node, NULL, NULL, dev); 187 ret = of_platform_populate(node, NULL, NULL, dev);
188 if (ret) { 188 if (ret) {
189 dev_err(dev, "failed to add dwc3 core\n"); 189 dev_err(dev, "failed to add dwc3 core\n");
190 goto err4; 190 goto err5;
191 } 191 }
192 } else { 192 } else {
193 dev_err(dev, "no device node, failed to add dwc3 core\n"); 193 dev_err(dev, "no device node, failed to add dwc3 core\n");
194 ret = -ENODEV; 194 ret = -ENODEV;
195 goto err4; 195 goto err5;
196 } 196 }
197 197
198 return 0; 198 return 0;
199 199
200err5:
201 platform_device_unregister(exynos->usb2_phy);
202 platform_device_unregister(exynos->usb3_phy);
200err4: 203err4:
201 regulator_disable(exynos->vdd10); 204 regulator_disable(exynos->vdd10);
202err3: 205err3:
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 5c0adb9c6fb2..50d6ae6f88bc 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
129 switch (dwc3_data->dr_mode) { 129 switch (dwc3_data->dr_mode) {
130 case USB_DR_MODE_PERIPHERAL: 130 case USB_DR_MODE_PERIPHERAL:
131 131
132 val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID 132 val &= ~(USB3_DELAY_VBUSVALID
133 | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) 133 | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
134 | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 134 | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
135 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); 135 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
136 136
137 val |= USB3_DEVICE_NOT_HOST; 137 /*
138 * USB3_PORT2_FORCE_VBUSVALID When '1' and when
139 * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
140 * of the pico PHY to 1.
141 */
142
143 val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
138 break; 144 break;
139 145
140 case USB_DR_MODE_HOST: 146 case USB_DR_MODE_HOST:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9a7d0bd15dc3..07248ff1be5c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
347 return ret; 347 return ret;
348} 348}
349 349
350static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
351{
352 struct dwc3 *dwc = dep->dwc;
353 struct dwc3_gadget_ep_cmd_params params;
354 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
355
356 /*
357 * As of core revision 2.60a the recommended programming model
358 * is to set the ClearPendIN bit when issuing a Clear Stall EP
359 * command for IN endpoints. This is to prevent an issue where
360 * some (non-compliant) hosts may not send ACK TPs for pending
361 * IN transfers due to a mishandled error condition. Synopsys
362 * STAR 9000614252.
363 */
364 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
365 cmd |= DWC3_DEPCMD_CLEARPENDIN;
366
367 memset(&params, 0, sizeof(params));
368
369 return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
370}
371
350static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 372static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
351 struct dwc3_trb *trb) 373 struct dwc3_trb *trb)
352{ 374{
@@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1314 else 1336 else
1315 dep->flags |= DWC3_EP_STALL; 1337 dep->flags |= DWC3_EP_STALL;
1316 } else { 1338 } else {
1317 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1339 ret = dwc3_send_clear_stall_ep_cmd(dep);
1318 DWC3_DEPCMD_CLEARSTALL, &params);
1319 if (ret) 1340 if (ret)
1320 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1341 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1321 dep->name); 1342 dep->name);
@@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2247 2268
2248 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2269 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2249 struct dwc3_ep *dep; 2270 struct dwc3_ep *dep;
2250 struct dwc3_gadget_ep_cmd_params params;
2251 int ret; 2271 int ret;
2252 2272
2253 dep = dwc->eps[epnum]; 2273 dep = dwc->eps[epnum];
@@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2259 2279
2260 dep->flags &= ~DWC3_EP_STALL; 2280 dep->flags &= ~DWC3_EP_STALL;
2261 2281
2262 memset(&params, 0, sizeof(params)); 2282 ret = dwc3_send_clear_stall_ep_cmd(dep);
2263 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2264 DWC3_DEPCMD_CLEARSTALL, &params);
2265 WARN_ON_ONCE(ret); 2283 WARN_ON_ONCE(ret);
2266 } 2284 }
2267} 2285}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d67de0d22a2b..eb648485a58c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1868,14 +1868,19 @@ unknown:
1868 } 1868 }
1869 break; 1869 break;
1870 } 1870 }
1871 req->length = value; 1871
1872 req->context = cdev; 1872 if (value >= 0) {
1873 req->zero = value < w_length; 1873 req->length = value;
1874 value = composite_ep0_queue(cdev, req, GFP_ATOMIC); 1874 req->context = cdev;
1875 if (value < 0) { 1875 req->zero = value < w_length;
1876 DBG(cdev, "ep_queue --> %d\n", value); 1876 value = composite_ep0_queue(cdev, req,
1877 req->status = 0; 1877 GFP_ATOMIC);
1878 composite_setup_complete(gadget->ep0, req); 1878 if (value < 0) {
1879 DBG(cdev, "ep_queue --> %d\n", value);
1880 req->status = 0;
1881 composite_setup_complete(gadget->ep0,
1882 req);
1883 }
1879 } 1884 }
1880 return value; 1885 return value;
1881 } 1886 }
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index b6f60ca8a035..70cf3477f951 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
1401 .owner = THIS_MODULE, 1401 .owner = THIS_MODULE,
1402 .name = "configfs-gadget", 1402 .name = "configfs-gadget",
1403 }, 1403 },
1404 .match_existing_only = 1,
1404}; 1405};
1405 1406
1406static struct config_group *gadgets_make( 1407static struct config_group *gadgets_make(
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 73515d54e1cc..cc33d2667408 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2051 2051
2052 if (len < sizeof(*d) || 2052 if (len < sizeof(*d) ||
2053 d->bFirstInterfaceNumber >= ffs->interfaces_count || 2053 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2054 d->Reserved1) 2054 !d->Reserved1)
2055 return -EINVAL; 2055 return -EINVAL;
2056 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) 2056 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2057 if (d->Reserved2[i]) 2057 if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
2729 func->ffs->ss_descs_count; 2729 func->ffs->ss_descs_count;
2730 2730
2731 int fs_len, hs_len, ss_len, ret, i; 2731 int fs_len, hs_len, ss_len, ret, i;
2732 struct ffs_ep *eps_ptr;
2732 2733
2733 /* Make it a single chunk, less management later on */ 2734 /* Make it a single chunk, less management later on */
2734 vla_group(d); 2735 vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
2777 ffs->raw_descs_length); 2778 ffs->raw_descs_length);
2778 2779
2779 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); 2780 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2780 for (ret = ffs->eps_count; ret; --ret) { 2781 eps_ptr = vla_ptr(vlabuf, d, eps);
2781 struct ffs_ep *ptr; 2782 for (i = 0; i < ffs->eps_count; i++)
2782 2783 eps_ptr[i].num = -1;
2783 ptr = vla_ptr(vlabuf, d, eps);
2784 ptr[ret].num = -1;
2785 }
2786 2784
2787 /* Save pointers 2785 /* Save pointers
2788 * d_eps == vlabuf, func->eps used to kfree vlabuf later 2786 * d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
2851 goto error; 2849 goto error;
2852 2850
2853 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); 2851 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2854 if (c->cdev->use_os_string) 2852 if (c->cdev->use_os_string) {
2855 for (i = 0; i < ffs->interfaces_count; ++i) { 2853 for (i = 0; i < ffs->interfaces_count; ++i) {
2856 struct usb_os_desc *desc; 2854 struct usb_os_desc *desc;
2857 2855
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
2862 vla_ptr(vlabuf, d, ext_compat) + i * 16; 2860 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2863 INIT_LIST_HEAD(&desc->ext_prop); 2861 INIT_LIST_HEAD(&desc->ext_prop);
2864 } 2862 }
2865 ret = ffs_do_os_descs(ffs->ms_os_descs_count, 2863 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2866 vla_ptr(vlabuf, d, raw_descs) + 2864 vla_ptr(vlabuf, d, raw_descs) +
2867 fs_len + hs_len + ss_len, 2865 fs_len + hs_len + ss_len,
2868 d_raw_descs__sz - fs_len - hs_len - ss_len, 2866 d_raw_descs__sz - fs_len - hs_len -
2869 __ffs_func_bind_do_os_desc, func); 2867 ss_len,
2870 if (unlikely(ret < 0)) 2868 __ffs_func_bind_do_os_desc, func);
2871 goto error; 2869 if (unlikely(ret < 0))
2870 goto error;
2871 }
2872 func->function.os_desc_n = 2872 func->function.os_desc_n =
2873 c->cdev->use_os_string ? ffs->interfaces_count : 0; 2873 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2874 2874
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index c45104e3a64b..64706a789580 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
161 .wMaxPacketSize = cpu_to_le16(512) 161 .wMaxPacketSize = cpu_to_le16(512)
162}; 162};
163 163
164static struct usb_qualifier_descriptor dev_qualifier = {
165 .bLength = sizeof(dev_qualifier),
166 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
167 .bcdUSB = cpu_to_le16(0x0200),
168 .bDeviceClass = USB_CLASS_PRINTER,
169 .bNumConfigurations = 1
170};
171
172static struct usb_descriptor_header *hs_printer_function[] = { 164static struct usb_descriptor_header *hs_printer_function[] = {
173 (struct usb_descriptor_header *) &intf_desc, 165 (struct usb_descriptor_header *) &intf_desc,
174 (struct usb_descriptor_header *) &hs_ep_in_desc, 166 (struct usb_descriptor_header *) &hs_ep_in_desc,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 35fe3c80cfc0..197f73386fac 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1445 for (i = 0; i < TPG_INSTANCES; ++i) 1445 for (i = 0; i < TPG_INSTANCES; ++i)
1446 if (tpg_instances[i].tpg == tpg) 1446 if (tpg_instances[i].tpg == tpg)
1447 break; 1447 break;
1448 if (i < TPG_INSTANCES) 1448 if (i < TPG_INSTANCES) {
1449 tpg_instances[i].tpg = NULL; 1449 tpg_instances[i].tpg = NULL;
1450 opts = container_of(tpg_instances[i].func_inst, 1450 opts = container_of(tpg_instances[i].func_inst,
1451 struct f_tcm_opts, func_inst); 1451 struct f_tcm_opts, func_inst);
1452 mutex_lock(&opts->dep_lock); 1452 mutex_lock(&opts->dep_lock);
1453 if (opts->has_dep) 1453 if (opts->has_dep)
1454 module_put(opts->dependent); 1454 module_put(opts->dependent);
1455 else 1455 else
1456 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item); 1456 configfs_undepend_item_unlocked(
1457 mutex_unlock(&opts->dep_lock); 1457 &opts->func_inst.group.cg_item);
1458 mutex_unlock(&opts->dep_lock);
1459 }
1458 mutex_unlock(&tpg_instances_lock); 1460 mutex_unlock(&tpg_instances_lock);
1459 1461
1460 kfree(tpg); 1462 kfree(tpg);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 186d4b162524..cd214ec8a601 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
598 NULL, 598 NULL,
599}; 599};
600 600
601static struct usb_qualifier_descriptor devqual_desc = {
602 .bLength = sizeof devqual_desc,
603 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
604
605 .bcdUSB = cpu_to_le16(0x200),
606 .bDeviceClass = USB_CLASS_MISC,
607 .bDeviceSubClass = 0x02,
608 .bDeviceProtocol = 0x01,
609 .bNumConfigurations = 1,
610 .bRESERVED = 0,
611};
612
613static struct usb_interface_assoc_descriptor iad_desc = { 601static struct usb_interface_assoc_descriptor iad_desc = {
614 .bLength = sizeof iad_desc, 602 .bLength = sizeof iad_desc,
615 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, 603 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
1292 1280
1293 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 1281 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
1294 struct cntrl_cur_lay3 c; 1282 struct cntrl_cur_lay3 c;
1283 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
1295 1284
1296 if (entity_id == USB_IN_CLK_ID) 1285 if (entity_id == USB_IN_CLK_ID)
1297 c.dCUR = p_srate; 1286 c.dCUR = p_srate;
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index d62683017cf3..990df221c629 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
83 * USB 2.0 devices need to expose both high speed and full speed 83 * USB 2.0 devices need to expose both high speed and full speed
84 * descriptors, unless they only run at full speed. 84 * descriptors, unless they only run at full speed.
85 * 85 *
86 * That means alternate endpoint descriptors (bigger packets) 86 * That means alternate endpoint descriptors (bigger packets).
87 * and a "device qualifier" ... plus more construction options
88 * for the configuration descriptor.
89 */ 87 */
90struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { 88struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE, 89 .bLength = USB_DT_ENDPOINT_SIZE,
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index e64479f882a5..aa3707bdebb4 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
938 struct usb_ep *ep = dev->gadget->ep0; 938 struct usb_ep *ep = dev->gadget->ep0;
939 struct usb_request *req = dev->req; 939 struct usb_request *req = dev->req;
940 940
941 if ((retval = setup_req (ep, req, 0)) == 0) 941 if ((retval = setup_req (ep, req, 0)) == 0) {
942 retval = usb_ep_queue (ep, req, GFP_ATOMIC); 942 spin_unlock_irq (&dev->lock);
943 retval = usb_ep_queue (ep, req, GFP_KERNEL);
944 spin_lock_irq (&dev->lock);
945 }
943 dev->state = STATE_DEV_CONNECTED; 946 dev->state = STATE_DEV_CONNECTED;
944 947
945 /* assume that was SET_CONFIGURATION */ 948 /* assume that was SET_CONFIGURATION */
@@ -1457,8 +1460,11 @@ delegate:
1457 w_length); 1460 w_length);
1458 if (value < 0) 1461 if (value < 0)
1459 break; 1462 break;
1463
1464 spin_unlock (&dev->lock);
1460 value = usb_ep_queue (gadget->ep0, dev->req, 1465 value = usb_ep_queue (gadget->ep0, dev->req,
1461 GFP_ATOMIC); 1466 GFP_KERNEL);
1467 spin_lock (&dev->lock);
1462 if (value < 0) { 1468 if (value < 0) {
1463 clean_req (gadget->ep0, dev->req); 1469 clean_req (gadget->ep0, dev->req);
1464 break; 1470 break;
@@ -1481,11 +1487,14 @@ delegate:
1481 if (value >= 0 && dev->state != STATE_DEV_SETUP) { 1487 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1482 req->length = value; 1488 req->length = value;
1483 req->zero = value < w_length; 1489 req->zero = value < w_length;
1484 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); 1490
1491 spin_unlock (&dev->lock);
1492 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1485 if (value < 0) { 1493 if (value < 0) {
1486 DBG (dev, "ep_queue --> %d\n", value); 1494 DBG (dev, "ep_queue --> %d\n", value);
1487 req->status = 0; 1495 req->status = 0;
1488 } 1496 }
1497 return value;
1489 } 1498 }
1490 1499
1491 /* device stalls when value < 0 */ 1500 /* device stalls when value < 0 */
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 6e8300d6a737..e1b2dcebdc2e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
603 } 603 }
604 } 604 }
605 605
606 list_add_tail(&driver->pending, &gadget_driver_pending_list); 606 if (!driver->match_existing_only) {
607 pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", 607 list_add_tail(&driver->pending, &gadget_driver_pending_list);
608 driver->function); 608 pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
609 driver->function);
610 ret = 0;
611 }
612
609 mutex_unlock(&udc_lock); 613 mutex_unlock(&udc_lock);
610 return 0; 614 return ret;
611found: 615found:
612 ret = udc_bind_to_driver(udc, driver); 616 ret = udc_bind_to_driver(udc, driver);
613 mutex_unlock(&udc_lock); 617 mutex_unlock(&udc_lock);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index ae1b6e69eb96..a962b89b65a6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
368{ 368{
369 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 369 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
370 370
371 /**
372 * Protect the system from crashing at system shutdown in cases where
373 * usb host is not added yet from OTG controller driver.
374 * As ehci_setup() not done yet, so stop accessing registers or
375 * variables initialized in ehci_setup()
376 */
377 if (!ehci->sbrn)
378 return;
379
371 spin_lock_irq(&ehci->lock); 380 spin_lock_irq(&ehci->lock);
372 ehci->shutdown = true; 381 ehci->shutdown = true;
373 ehci->rh_state = EHCI_RH_STOPPING; 382 ehci->rh_state = EHCI_RH_STOPPING;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ffc90295a95f..74f62d68f013 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -872,15 +872,23 @@ int ehci_hub_control(
872) { 872) {
873 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 873 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
874 int ports = HCS_N_PORTS (ehci->hcs_params); 874 int ports = HCS_N_PORTS (ehci->hcs_params);
875 u32 __iomem *status_reg = &ehci->regs->port_status[ 875 u32 __iomem *status_reg, *hostpc_reg;
876 (wIndex & 0xff) - 1];
877 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
878 u32 temp, temp1, status; 876 u32 temp, temp1, status;
879 unsigned long flags; 877 unsigned long flags;
880 int retval = 0; 878 int retval = 0;
881 unsigned selector; 879 unsigned selector;
882 880
883 /* 881 /*
882 * Avoid underflow while calculating (wIndex & 0xff) - 1.
883 * The compiler might deduce that wIndex can never be 0 and then
884 * optimize away the tests for !wIndex below.
885 */
886 temp = wIndex & 0xff;
887 temp -= (temp > 0);
888 status_reg = &ehci->regs->port_status[temp];
889 hostpc_reg = &ehci->regs->hostpc[temp];
890
891 /*
884 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. 892 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
885 * HCS_INDICATOR may say we can change LEDs to off/amber/green. 893 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
886 * (track current state ourselves) ... blink for diagnostics, 894 * (track current state ourselves) ... blink for diagnostics,
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index d3afc89d00f5..2f8d3af811ce 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
179static int ehci_msm_pm_suspend(struct device *dev) 179static int ehci_msm_pm_suspend(struct device *dev)
180{ 180{
181 struct usb_hcd *hcd = dev_get_drvdata(dev); 181 struct usb_hcd *hcd = dev_get_drvdata(dev);
182 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
182 bool do_wakeup = device_may_wakeup(dev); 183 bool do_wakeup = device_may_wakeup(dev);
183 184
184 dev_dbg(dev, "ehci-msm PM suspend\n"); 185 dev_dbg(dev, "ehci-msm PM suspend\n");
185 186
186 return ehci_suspend(hcd, do_wakeup); 187 /* Only call ehci_suspend if ehci_setup has been done */
188 if (ehci->sbrn)
189 return ehci_suspend(hcd, do_wakeup);
190
191 return 0;
187} 192}
188 193
189static int ehci_msm_pm_resume(struct device *dev) 194static int ehci_msm_pm_resume(struct device *dev)
190{ 195{
191 struct usb_hcd *hcd = dev_get_drvdata(dev); 196 struct usb_hcd *hcd = dev_get_drvdata(dev);
197 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
192 198
193 dev_dbg(dev, "ehci-msm PM resume\n"); 199 dev_dbg(dev, "ehci-msm PM resume\n");
194 ehci_resume(hcd, false); 200
201 /* Only call ehci_resume if ehci_setup has been done */
202 if (ehci->sbrn)
203 ehci_resume(hcd, false);
195 204
196 return 0; 205 return 0;
197} 206}
207
198#else 208#else
199#define ehci_msm_pm_suspend NULL 209#define ehci_msm_pm_suspend NULL
200#define ehci_msm_pm_resume NULL 210#define ehci_msm_pm_resume NULL
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4031b372008e..9a3d7db5be57 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
81 struct usb_hcd *hcd = platform_get_drvdata(pdev); 81 struct usb_hcd *hcd = platform_get_drvdata(pdev);
82 struct tegra_ehci_hcd *tegra = 82 struct tegra_ehci_hcd *tegra =
83 (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; 83 (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
84 bool has_utmi_pad_registers = false;
84 85
85 phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); 86 phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
86 if (!phy_np) 87 if (!phy_np)
87 return -ENOENT; 88 return -ENOENT;
88 89
90 if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
91 has_utmi_pad_registers = true;
92
89 if (!usb1_reset_attempted) { 93 if (!usb1_reset_attempted) {
90 struct reset_control *usb1_reset; 94 struct reset_control *usb1_reset;
91 95
92 usb1_reset = of_reset_control_get(phy_np, "usb"); 96 if (!has_utmi_pad_registers)
97 usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
98 else
99 usb1_reset = tegra->rst;
100
93 if (IS_ERR(usb1_reset)) { 101 if (IS_ERR(usb1_reset)) {
94 dev_warn(&pdev->dev, 102 dev_warn(&pdev->dev,
95 "can't get utmi-pads reset from the PHY\n"); 103 "can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
99 reset_control_assert(usb1_reset); 107 reset_control_assert(usb1_reset);
100 udelay(1); 108 udelay(1);
101 reset_control_deassert(usb1_reset); 109 reset_control_deassert(usb1_reset);
110
111 if (!has_utmi_pad_registers)
112 reset_control_put(usb1_reset);
102 } 113 }
103 114
104 reset_control_put(usb1_reset);
105 usb1_reset_attempted = true; 115 usb1_reset_attempted = true;
106 } 116 }
107 117
108 if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) { 118 if (!has_utmi_pad_registers) {
109 reset_control_assert(tegra->rst); 119 reset_control_assert(tegra->rst);
110 udelay(1); 120 udelay(1);
111 reset_control_deassert(tegra->rst); 121 reset_control_deassert(tegra->rst);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d029bbe9eb36..641fed609911 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
183{ 183{
184 int branch; 184 int branch;
185 185
186 ed->state = ED_OPER;
187 ed->ed_prev = NULL; 186 ed->ed_prev = NULL;
188 ed->ed_next = NULL; 187 ed->ed_next = NULL;
189 ed->hwNextED = 0; 188 ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
259 /* the HC may not see the schedule updates yet, but if it does 258 /* the HC may not see the schedule updates yet, but if it does
260 * then they'll be properly ordered. 259 * then they'll be properly ordered.
261 */ 260 */
261
262 ed->state = ED_OPER;
262 return 0; 263 return 0;
263} 264}
264 265
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 48672fac7ff3..c10972fcc8e4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,6 +37,7 @@
37/* Device for a quirk */ 37/* Device for a quirk */
38#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 38#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
39#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 39#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
40#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
40#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 41#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
41 42
42#define PCI_VENDOR_ID_ETRON 0x1b6f 43#define PCI_VENDOR_ID_ETRON 0x1b6f
@@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
114 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 115 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
115 } 116 }
116 117
118 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
119 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
120 xhci->quirks |= XHCI_BROKEN_STREAMS;
121
117 if (pdev->vendor == PCI_VENDOR_ID_NEC) 122 if (pdev->vendor == PCI_VENDOR_ID_NEC)
118 xhci->quirks |= XHCI_NEC_HOST; 123 xhci->quirks |= XHCI_NEC_HOST;
119 124
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 676ea458148b..1f3f981fe7f8 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
196 ret = clk_prepare_enable(clk); 196 ret = clk_prepare_enable(clk);
197 if (ret) 197 if (ret)
198 goto put_hcd; 198 goto put_hcd;
199 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
200 ret = -EPROBE_DEFER;
201 goto put_hcd;
199 } 202 }
200 203
201 xhci = hcd_to_xhci(hcd); 204 xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52deae4b7eac..d7d502578d79 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
290 290
291 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 291 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
293
294 /*
295 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
296 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
297 * but the completion event in never sent. Use the cmd timeout timer to
298 * handle those cases. Use twice the time to cover the bit polling retry
299 */
300 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
293 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 301 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
294 &xhci->op_regs->cmd_ring); 302 &xhci->op_regs->cmd_ring);
295 303
@@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
314 322
315 xhci_err(xhci, "Stopped the command ring failed, " 323 xhci_err(xhci, "Stopped the command ring failed, "
316 "maybe the host is dead\n"); 324 "maybe the host is dead\n");
325 del_timer(&xhci->cmd_timer);
317 xhci->xhc_state |= XHCI_STATE_DYING; 326 xhci->xhc_state |= XHCI_STATE_DYING;
318 xhci_quiesce(xhci); 327 xhci_quiesce(xhci);
319 xhci_halt(xhci); 328 xhci_halt(xhci);
@@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data)
1246 int ret; 1255 int ret;
1247 unsigned long flags; 1256 unsigned long flags;
1248 u64 hw_ring_state; 1257 u64 hw_ring_state;
1249 struct xhci_command *cur_cmd = NULL; 1258 bool second_timeout = false;
1250 xhci = (struct xhci_hcd *) data; 1259 xhci = (struct xhci_hcd *) data;
1251 1260
1252 /* mark this command to be cancelled */ 1261 /* mark this command to be cancelled */
1253 spin_lock_irqsave(&xhci->lock, flags); 1262 spin_lock_irqsave(&xhci->lock, flags);
1254 if (xhci->current_cmd) { 1263 if (xhci->current_cmd) {
1255 cur_cmd = xhci->current_cmd; 1264 if (xhci->current_cmd->status == COMP_CMD_ABORT)
1256 cur_cmd->status = COMP_CMD_ABORT; 1265 second_timeout = true;
1266 xhci->current_cmd->status = COMP_CMD_ABORT;
1257 } 1267 }
1258 1268
1259
1260 /* Make sure command ring is running before aborting it */ 1269 /* Make sure command ring is running before aborting it */
1261 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1270 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1262 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1271 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1263 (hw_ring_state & CMD_RING_RUNNING)) { 1272 (hw_ring_state & CMD_RING_RUNNING)) {
1264
1265 spin_unlock_irqrestore(&xhci->lock, flags); 1273 spin_unlock_irqrestore(&xhci->lock, flags);
1266 xhci_dbg(xhci, "Command timeout\n"); 1274 xhci_dbg(xhci, "Command timeout\n");
1267 ret = xhci_abort_cmd_ring(xhci); 1275 ret = xhci_abort_cmd_ring(xhci);
@@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data)
1273 } 1281 }
1274 return; 1282 return;
1275 } 1283 }
1284
1285 /* command ring failed to restart, or host removed. Bail out */
1286 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1287 spin_unlock_irqrestore(&xhci->lock, flags);
1288 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1289 xhci_cleanup_command_queue(xhci);
1290 return;
1291 }
1292
1276 /* command timeout on stopped ring, ring can't be aborted */ 1293 /* command timeout on stopped ring, ring can't be aborted */
1277 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1294 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1278 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1295 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
@@ -2721,7 +2738,8 @@ hw_died:
2721 writel(irq_pending, &xhci->ir_set->irq_pending); 2738 writel(irq_pending, &xhci->ir_set->irq_pending);
2722 } 2739 }
2723 2740
2724 if (xhci->xhc_state & XHCI_STATE_DYING) { 2741 if (xhci->xhc_state & XHCI_STATE_DYING ||
2742 xhci->xhc_state & XHCI_STATE_HALTED) {
2725 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2743 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2726 "Shouldn't IRQs be disabled?\n"); 2744 "Shouldn't IRQs be disabled?\n");
2727 /* Clear the event handler busy flag (RW1C); 2745 /* Clear the event handler busy flag (RW1C);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fa7e1ef36cd9..f2f9518c53ab 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd)
685 u32 temp; 685 u32 temp;
686 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 686 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
687 687
688 if (xhci->xhc_state & XHCI_STATE_HALTED)
689 return;
690
691 mutex_lock(&xhci->mutex); 688 mutex_lock(&xhci->mutex);
692 spin_lock_irq(&xhci->lock);
693 xhci->xhc_state |= XHCI_STATE_HALTED;
694 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
695 689
696 /* Make sure the xHC is halted for a USB3 roothub 690 if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
697 * (xhci_stop() could be called as part of failed init). 691 spin_lock_irq(&xhci->lock);
698 */ 692
699 xhci_halt(xhci); 693 xhci->xhc_state |= XHCI_STATE_HALTED;
700 xhci_reset(xhci); 694 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
701 spin_unlock_irq(&xhci->lock); 695 xhci_halt(xhci);
696 xhci_reset(xhci);
697
698 spin_unlock_irq(&xhci->lock);
699 }
700
701 if (!usb_hcd_is_primary_hcd(hcd)) {
702 mutex_unlock(&xhci->mutex);
703 return;
704 }
702 705
703 xhci_cleanup_msix(xhci); 706 xhci_cleanup_msix(xhci);
704 707
@@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4886 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 4889 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4887 xhci_print_registers(xhci); 4890 xhci_print_registers(xhci);
4888 4891
4889 xhci->quirks = quirks; 4892 xhci->quirks |= quirks;
4890 4893
4891 get_quirks(dev, xhci); 4894 get_quirks(dev, xhci);
4892 4895
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 39fd95833eb8..f824336def5c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
1090 musb_platform_try_idle(musb, 0); 1090 musb_platform_try_idle(musb, 0);
1091} 1091}
1092 1092
1093static void musb_shutdown(struct platform_device *pdev)
1094{
1095 struct musb *musb = dev_to_musb(&pdev->dev);
1096 unsigned long flags;
1097
1098 pm_runtime_get_sync(musb->controller);
1099
1100 musb_host_cleanup(musb);
1101 musb_gadget_cleanup(musb);
1102
1103 spin_lock_irqsave(&musb->lock, flags);
1104 musb_platform_disable(musb);
1105 musb_generic_disable(musb);
1106 spin_unlock_irqrestore(&musb->lock, flags);
1107
1108 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1109 musb_platform_exit(musb);
1110
1111 pm_runtime_put(musb->controller);
1112 /* FIXME power down */
1113}
1114
1115
1116/*-------------------------------------------------------------------------*/ 1093/*-------------------------------------------------------------------------*/
1117 1094
1118/* 1095/*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
1702#define use_dma 0 1679#define use_dma 0
1703#endif 1680#endif
1704 1681
1705static void (*musb_phy_callback)(enum musb_vbus_id_status status); 1682static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1706 1683
1707/* 1684/*
1708 * musb_mailbox - optional phy notifier function 1685 * musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
1711 * Optionally gets called from the USB PHY. Note that the USB PHY must be 1688 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1712 * disabled at the point the phy_callback is registered or unregistered. 1689 * disabled at the point the phy_callback is registered or unregistered.
1713 */ 1690 */
1714void musb_mailbox(enum musb_vbus_id_status status) 1691int musb_mailbox(enum musb_vbus_id_status status)
1715{ 1692{
1716 if (musb_phy_callback) 1693 if (musb_phy_callback)
1717 musb_phy_callback(status); 1694 return musb_phy_callback(status);
1718 1695
1696 return -ENODEV;
1719}; 1697};
1720EXPORT_SYMBOL_GPL(musb_mailbox); 1698EXPORT_SYMBOL_GPL(musb_mailbox);
1721 1699
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2028 musb_readl = musb_default_readl; 2006 musb_readl = musb_default_readl;
2029 musb_writel = musb_default_writel; 2007 musb_writel = musb_default_writel;
2030 2008
2031 /* We need musb_read/write functions initialized for PM */
2032 pm_runtime_use_autosuspend(musb->controller);
2033 pm_runtime_set_autosuspend_delay(musb->controller, 200);
2034 pm_runtime_enable(musb->controller);
2035
2036 /* The musb_platform_init() call: 2009 /* The musb_platform_init() call:
2037 * - adjusts musb->mregs 2010 * - adjusts musb->mregs
2038 * - sets the musb->isr 2011 * - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2134 if (musb->ops->phy_callback) 2107 if (musb->ops->phy_callback)
2135 musb_phy_callback = musb->ops->phy_callback; 2108 musb_phy_callback = musb->ops->phy_callback;
2136 2109
2110 /*
2111 * We need musb_read/write functions initialized for PM.
2112 * Note that at least 2430 glue needs autosuspend delay
2113 * somewhere above 300 ms for the hardware to idle properly
2114 * after disconnecting the cable in host mode. Let's use
2115 * 500 ms for some margin.
2116 */
2117 pm_runtime_use_autosuspend(musb->controller);
2118 pm_runtime_set_autosuspend_delay(musb->controller, 500);
2119 pm_runtime_enable(musb->controller);
2137 pm_runtime_get_sync(musb->controller); 2120 pm_runtime_get_sync(musb->controller);
2138 2121
2139 status = usb_phy_init(musb->xceiv); 2122 status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2237 if (status) 2220 if (status)
2238 goto fail5; 2221 goto fail5;
2239 2222
2240 pm_runtime_put(musb->controller); 2223 pm_runtime_mark_last_busy(musb->controller);
2241 2224 pm_runtime_put_autosuspend(musb->controller);
2242 /*
2243 * For why this is currently needed, see commit 3e43a0725637
2244 * ("usb: musb: core: add pm_runtime_irq_safe()")
2245 */
2246 pm_runtime_irq_safe(musb->controller);
2247 2225
2248 return 0; 2226 return 0;
2249 2227
@@ -2265,7 +2243,9 @@ fail2_5:
2265 usb_phy_shutdown(musb->xceiv); 2243 usb_phy_shutdown(musb->xceiv);
2266 2244
2267err_usb_phy_init: 2245err_usb_phy_init:
2246 pm_runtime_dont_use_autosuspend(musb->controller);
2268 pm_runtime_put_sync(musb->controller); 2247 pm_runtime_put_sync(musb->controller);
2248 pm_runtime_disable(musb->controller);
2269 2249
2270fail2: 2250fail2:
2271 if (musb->irq_wake) 2251 if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ fail2:
2273 musb_platform_exit(musb); 2253 musb_platform_exit(musb);
2274 2254
2275fail1: 2255fail1:
2276 pm_runtime_disable(musb->controller);
2277 dev_err(musb->controller, 2256 dev_err(musb->controller,
2278 "musb_init_controller failed with status %d\n", status); 2257 "musb_init_controller failed with status %d\n", status);
2279 2258
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
2312{ 2291{
2313 struct device *dev = &pdev->dev; 2292 struct device *dev = &pdev->dev;
2314 struct musb *musb = dev_to_musb(dev); 2293 struct musb *musb = dev_to_musb(dev);
2294 unsigned long flags;
2315 2295
2316 /* this gets called on rmmod. 2296 /* this gets called on rmmod.
2317 * - Host mode: host may still be active 2297 * - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
2319 * - OTG mode: both roles are deactivated (or never-activated) 2299 * - OTG mode: both roles are deactivated (or never-activated)
2320 */ 2300 */
2321 musb_exit_debugfs(musb); 2301 musb_exit_debugfs(musb);
2322 musb_shutdown(pdev);
2323 musb_phy_callback = NULL;
2324
2325 if (musb->dma_controller)
2326 musb_dma_controller_destroy(musb->dma_controller);
2327
2328 usb_phy_shutdown(musb->xceiv);
2329 2302
2330 cancel_work_sync(&musb->irq_work); 2303 cancel_work_sync(&musb->irq_work);
2331 cancel_delayed_work_sync(&musb->finish_resume_work); 2304 cancel_delayed_work_sync(&musb->finish_resume_work);
2332 cancel_delayed_work_sync(&musb->deassert_reset_work); 2305 cancel_delayed_work_sync(&musb->deassert_reset_work);
2306 pm_runtime_get_sync(musb->controller);
2307 musb_host_cleanup(musb);
2308 musb_gadget_cleanup(musb);
2309 spin_lock_irqsave(&musb->lock, flags);
2310 musb_platform_disable(musb);
2311 musb_generic_disable(musb);
2312 spin_unlock_irqrestore(&musb->lock, flags);
2313 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2314 pm_runtime_dont_use_autosuspend(musb->controller);
2315 pm_runtime_put_sync(musb->controller);
2316 pm_runtime_disable(musb->controller);
2317 musb_platform_exit(musb);
2318 musb_phy_callback = NULL;
2319 if (musb->dma_controller)
2320 musb_dma_controller_destroy(musb->dma_controller);
2321 usb_phy_shutdown(musb->xceiv);
2333 musb_free(musb); 2322 musb_free(musb);
2334 device_init_wakeup(dev, 0); 2323 device_init_wakeup(dev, 0);
2335 return 0; 2324 return 0;
@@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb)
2429 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); 2418 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2430 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); 2419 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2431 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); 2420 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2432 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); 2421 if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2422 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2433 2423
2434 for (i = 0; i < musb->config->num_eps; ++i) { 2424 for (i = 0; i < musb->config->num_eps; ++i) {
2435 struct musb_hw_ep *hw_ep; 2425 struct musb_hw_ep *hw_ep;
@@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = {
2612 }, 2602 },
2613 .probe = musb_probe, 2603 .probe = musb_probe,
2614 .remove = musb_remove, 2604 .remove = musb_remove,
2615 .shutdown = musb_shutdown,
2616}; 2605};
2617 2606
2618module_platform_driver(musb_driver); 2607module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b6afe9e43305..b55a776b03eb 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -215,7 +215,7 @@ struct musb_platform_ops {
215 dma_addr_t *dma_addr, u32 *len); 215 dma_addr_t *dma_addr, u32 *len);
216 void (*pre_root_reset_end)(struct musb *musb); 216 void (*pre_root_reset_end)(struct musb *musb);
217 void (*post_root_reset_end)(struct musb *musb); 217 void (*post_root_reset_end)(struct musb *musb);
218 void (*phy_callback)(enum musb_vbus_id_status status); 218 int (*phy_callback)(enum musb_vbus_id_status status);
219}; 219};
220 220
221/* 221/*
@@ -312,6 +312,7 @@ struct musb {
312 struct work_struct irq_work; 312 struct work_struct irq_work;
313 struct delayed_work deassert_reset_work; 313 struct delayed_work deassert_reset_work;
314 struct delayed_work finish_resume_work; 314 struct delayed_work finish_resume_work;
315 struct delayed_work gadget_work;
315 u16 hwvers; 316 u16 hwvers;
316 317
317 u16 intrrxe; 318 u16 intrrxe;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 152865b36522..af2a3a7addf9 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1656 return usb_phy_set_power(musb->xceiv, mA); 1656 return usb_phy_set_power(musb->xceiv, mA);
1657} 1657}
1658 1658
1659static void musb_gadget_work(struct work_struct *work)
1660{
1661 struct musb *musb;
1662 unsigned long flags;
1663
1664 musb = container_of(work, struct musb, gadget_work.work);
1665 pm_runtime_get_sync(musb->controller);
1666 spin_lock_irqsave(&musb->lock, flags);
1667 musb_pullup(musb, musb->softconnect);
1668 spin_unlock_irqrestore(&musb->lock, flags);
1669 pm_runtime_mark_last_busy(musb->controller);
1670 pm_runtime_put_autosuspend(musb->controller);
1671}
1672
1659static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1673static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1660{ 1674{
1661 struct musb *musb = gadget_to_musb(gadget); 1675 struct musb *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1663 1677
1664 is_on = !!is_on; 1678 is_on = !!is_on;
1665 1679
1666 pm_runtime_get_sync(musb->controller);
1667
1668 /* NOTE: this assumes we are sensing vbus; we'd rather 1680 /* NOTE: this assumes we are sensing vbus; we'd rather
1669 * not pullup unless the B-session is active. 1681 * not pullup unless the B-session is active.
1670 */ 1682 */
1671 spin_lock_irqsave(&musb->lock, flags); 1683 spin_lock_irqsave(&musb->lock, flags);
1672 if (is_on != musb->softconnect) { 1684 if (is_on != musb->softconnect) {
1673 musb->softconnect = is_on; 1685 musb->softconnect = is_on;
1674 musb_pullup(musb, is_on); 1686 schedule_delayed_work(&musb->gadget_work, 0);
1675 } 1687 }
1676 spin_unlock_irqrestore(&musb->lock, flags); 1688 spin_unlock_irqrestore(&musb->lock, flags);
1677 1689
1678 pm_runtime_put(musb->controller);
1679
1680 return 0; 1690 return 0;
1681} 1691}
1682 1692
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
1845#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) 1855#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1846 musb->g.is_otg = 0; 1856 musb->g.is_otg = 0;
1847#endif 1857#endif
1848 1858 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1849 musb_g_init_endpoints(musb); 1859 musb_g_init_endpoints(musb);
1850 1860
1851 musb->is_active = 0; 1861 musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
1866{ 1876{
1867 if (musb->port_mode == MUSB_PORT_MODE_HOST) 1877 if (musb->port_mode == MUSB_PORT_MODE_HOST)
1868 return; 1878 return;
1879
1880 cancel_delayed_work_sync(&musb->gadget_work);
1869 usb_del_gadget_udc(&musb->g); 1881 usb_del_gadget_udc(&musb->g);
1870} 1882}
1871 1883
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
1914 if (musb->xceiv->last_event == USB_EVENT_ID) 1926 if (musb->xceiv->last_event == USB_EVENT_ID)
1915 musb_platform_set_vbus(musb, 1); 1927 musb_platform_set_vbus(musb, 1);
1916 1928
1917 if (musb->xceiv->last_event == USB_EVENT_NONE) 1929 pm_runtime_mark_last_busy(musb->controller);
1918 pm_runtime_put(musb->controller); 1930 pm_runtime_put_autosuspend(musb->controller);
1919 1931
1920 return 0; 1932 return 0;
1921 1933
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
1934 struct musb *musb = gadget_to_musb(g); 1946 struct musb *musb = gadget_to_musb(g);
1935 unsigned long flags; 1947 unsigned long flags;
1936 1948
1937 if (musb->xceiv->last_event == USB_EVENT_NONE) 1949 pm_runtime_get_sync(musb->controller);
1938 pm_runtime_get_sync(musb->controller);
1939 1950
1940 /* 1951 /*
1941 * REVISIT always use otg_set_peripheral() here too; 1952 * REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
1963 * that currently misbehaves. 1974 * that currently misbehaves.
1964 */ 1975 */
1965 1976
1966 pm_runtime_put(musb->controller); 1977 pm_runtime_mark_last_busy(musb->controller);
1978 pm_runtime_put_autosuspend(musb->controller);
1967 1979
1968 return 0; 1980 return 0;
1969} 1981}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 2f8ad7f1f482..d227a71d85e1 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
434 } 434 }
435 } 435 }
436 436
437 if (qh != NULL && qh->is_ready) { 437 /*
438 * The pipe must be broken if current urb->status is set, so don't
439 * start next urb.
440 * TODO: to minimize the risk of regression, only check urb->status
441 * for RX, until we have a test case to understand the behavior of TX.
442 */
443 if ((!status || !is_in) && qh && qh->is_ready) {
438 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", 444 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
439 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 445 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
440 musb_start_urb(musb, is_in, qh); 446 musb_start_urb(musb, is_in, qh);
@@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
594 musb_writew(ep->regs, MUSB_TXCSR, 0); 600 musb_writew(ep->regs, MUSB_TXCSR, 0);
595 601
596 /* scrub all previous state, clearing toggle */ 602 /* scrub all previous state, clearing toggle */
597 } else {
598 csr = musb_readw(ep->regs, MUSB_RXCSR);
599 if (csr & MUSB_RXCSR_RXPKTRDY)
600 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
601 musb_readw(ep->regs, MUSB_RXCOUNT));
602
603 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
604 } 603 }
604 csr = musb_readw(ep->regs, MUSB_RXCSR);
605 if (csr & MUSB_RXCSR_RXPKTRDY)
606 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
607 musb_readw(ep->regs, MUSB_RXCOUNT));
608
609 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
605 610
606 /* target addr and (for multipoint) hub addr/port */ 611 /* target addr and (for multipoint) hub addr/port */
607 if (musb->is_multipoint) { 612 if (musb->is_multipoint) {
@@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
627 ep->rx_reinit = 0; 632 ep->rx_reinit = 0;
628} 633}
629 634
630static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, 635static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
631 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 636 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
632 struct urb *urb, u32 offset, 637 struct urb *urb, u32 offset,
633 u32 *length, u8 *mode) 638 u32 *length, u8 *mode)
@@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
664 } 669 }
665 channel->desired_mode = *mode; 670 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 671 musb_writew(epio, MUSB_TXCSR, csr);
667
668 return 0;
669} 672}
670 673
671static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, 674static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
672 struct musb_hw_ep *hw_ep, 675 struct musb_hw_ep *hw_ep,
673 struct musb_qh *qh, 676 struct musb_qh *qh,
674 struct urb *urb, 677 struct urb *urb,
675 u32 offset, 678 u32 offset,
676 u32 *length, 679 u32 *length,
677 u8 *mode) 680 u8 *mode)
678{ 681{
679 struct dma_channel *channel = hw_ep->tx_channel; 682 struct dma_channel *channel = hw_ep->tx_channel;
680 683
681 if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
682 return -ENODEV;
683
684 channel->actual_len = 0; 684 channel->actual_len = 0;
685 685
686 /* 686 /*
@@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
688 * to identify the zero-length-final-packet case. 688 * to identify the zero-length-final-packet case.
689 */ 689 */
690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
691
692 return 0;
693} 691}
694 692
695static bool musb_tx_dma_program(struct dma_controller *dma, 693static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
699 struct dma_channel *channel = hw_ep->tx_channel; 697 struct dma_channel *channel = hw_ep->tx_channel;
700 u16 pkt_size = qh->maxpacket; 698 u16 pkt_size = qh->maxpacket;
701 u8 mode; 699 u8 mode;
702 int res;
703 700
704 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) 701 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
705 res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, 702 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
706 offset, &length, &mode); 703 &length, &mode);
704 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
705 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
706 &length, &mode);
707 else 707 else
708 res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
709 offset, &length, &mode);
710 if (res)
711 return false; 708 return false;
712 709
713 qh->segsize = length; 710 qh->segsize = length;
@@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
995 if (is_in) { 992 if (is_in) {
996 dma = is_dma_capable() ? ep->rx_channel : NULL; 993 dma = is_dma_capable() ? ep->rx_channel : NULL;
997 994
998 /* clear nak timeout bit */ 995 /*
996 * Need to stop the transaction by clearing REQPKT first
997 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
998 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
999 */
999 rx_csr = musb_readw(epio, MUSB_RXCSR); 1000 rx_csr = musb_readw(epio, MUSB_RXCSR);
1000 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1001 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1002 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1003 musb_writew(epio, MUSB_RXCSR, rx_csr);
1001 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1004 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1002 musb_writew(epio, MUSB_RXCSR, rx_csr); 1005 musb_writew(epio, MUSB_RXCSR, rx_csr);
1003 1006
@@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1551 struct urb *urb, 1554 struct urb *urb,
1552 size_t len) 1555 size_t len)
1553{ 1556{
1554 struct dma_channel *channel = hw_ep->tx_channel; 1557 struct dma_channel *channel = hw_ep->rx_channel;
1555 void __iomem *epio = hw_ep->regs; 1558 void __iomem *epio = hw_ep->regs;
1556 dma_addr_t *buf; 1559 dma_addr_t *buf;
1557 u32 length, res; 1560 u32 length, res;
@@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1870 status = -EPROTO; 1873 status = -EPROTO;
1871 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1874 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1872 1875
1876 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1877 musb_writew(epio, MUSB_RXCSR, rx_csr);
1878
1873 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1879 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1874 1880
1875 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1881 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index c84e0322c108..0b4cec940386 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -49,97 +49,14 @@ struct omap2430_glue {
49 enum musb_vbus_id_status status; 49 enum musb_vbus_id_status status;
50 struct work_struct omap_musb_mailbox_work; 50 struct work_struct omap_musb_mailbox_work;
51 struct device *control_otghs; 51 struct device *control_otghs;
52 bool cable_connected;
53 bool enabled;
54 bool powered;
52}; 55};
53#define glue_to_musb(g) platform_get_drvdata(g->musb) 56#define glue_to_musb(g) platform_get_drvdata(g->musb)
54 57
55static struct omap2430_glue *_glue; 58static struct omap2430_glue *_glue;
56 59
57static struct timer_list musb_idle_timer;
58
59static void musb_do_idle(unsigned long _musb)
60{
61 struct musb *musb = (void *)_musb;
62 unsigned long flags;
63 u8 power;
64 u8 devctl;
65
66 spin_lock_irqsave(&musb->lock, flags);
67
68 switch (musb->xceiv->otg->state) {
69 case OTG_STATE_A_WAIT_BCON:
70
71 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
72 if (devctl & MUSB_DEVCTL_BDEVICE) {
73 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
74 MUSB_DEV_MODE(musb);
75 } else {
76 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
77 MUSB_HST_MODE(musb);
78 }
79 break;
80 case OTG_STATE_A_SUSPEND:
81 /* finish RESUME signaling? */
82 if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
83 power = musb_readb(musb->mregs, MUSB_POWER);
84 power &= ~MUSB_POWER_RESUME;
85 dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
86 musb_writeb(musb->mregs, MUSB_POWER, power);
87 musb->is_active = 1;
88 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
89 | MUSB_PORT_STAT_RESUME);
90 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
91 usb_hcd_poll_rh_status(musb->hcd);
92 /* NOTE: it might really be A_WAIT_BCON ... */
93 musb->xceiv->otg->state = OTG_STATE_A_HOST;
94 }
95 break;
96 case OTG_STATE_A_HOST:
97 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
98 if (devctl & MUSB_DEVCTL_BDEVICE)
99 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
100 else
101 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
102 default:
103 break;
104 }
105 spin_unlock_irqrestore(&musb->lock, flags);
106}
107
108
109static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
110{
111 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
112 static unsigned long last_timer;
113
114 if (timeout == 0)
115 timeout = default_timeout;
116
117 /* Never idle if active, or when VBUS timeout is not set as host */
118 if (musb->is_active || ((musb->a_wait_bcon == 0)
119 && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
120 dev_dbg(musb->controller, "%s active, deleting timer\n",
121 usb_otg_state_string(musb->xceiv->otg->state));
122 del_timer(&musb_idle_timer);
123 last_timer = jiffies;
124 return;
125 }
126
127 if (time_after(last_timer, timeout)) {
128 if (!timer_pending(&musb_idle_timer))
129 last_timer = timeout;
130 else {
131 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
132 return;
133 }
134 }
135 last_timer = timeout;
136
137 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
138 usb_otg_state_string(musb->xceiv->otg->state),
139 (unsigned long)jiffies_to_msecs(timeout - jiffies));
140 mod_timer(&musb_idle_timer, timeout);
141}
142
143static void omap2430_musb_set_vbus(struct musb *musb, int is_on) 60static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
144{ 61{
145 struct usb_otg *otg = musb->xceiv->otg; 62 struct usb_otg *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
205 musb_readb(musb->mregs, MUSB_DEVCTL)); 122 musb_readb(musb->mregs, MUSB_DEVCTL));
206} 123}
207 124
208static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
209{
210 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
211
212 devctl |= MUSB_DEVCTL_SESSION;
213 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
214
215 return 0;
216}
217
218static inline void omap2430_low_level_exit(struct musb *musb) 125static inline void omap2430_low_level_exit(struct musb *musb)
219{ 126{
220 u32 l; 127 u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
234 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 141 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
235} 142}
236 143
237static void omap2430_musb_mailbox(enum musb_vbus_id_status status) 144/*
145 * We can get multiple cable events so we need to keep track
146 * of the power state. Only keep power enabled if USB cable is
147 * connected and a gadget is started.
148 */
149static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
150{
151 struct device *dev = musb->controller;
152 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
153 bool power_up;
154 int res;
155
156 if (glue->enabled != enabled)
157 glue->enabled = enabled;
158
159 if (glue->cable_connected != cable)
160 glue->cable_connected = cable;
161
162 power_up = glue->enabled && glue->cable_connected;
163 if (power_up == glue->powered) {
164 dev_warn(musb->controller, "power state already %i\n",
165 power_up);
166 return;
167 }
168
169 glue->powered = power_up;
170
171 if (power_up) {
172 res = pm_runtime_get_sync(musb->controller);
173 if (res < 0) {
174 dev_err(musb->controller, "could not enable: %i", res);
175 glue->powered = false;
176 }
177 } else {
178 pm_runtime_mark_last_busy(musb->controller);
179 pm_runtime_put_autosuspend(musb->controller);
180 }
181}
182
183static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
238{ 184{
239 struct omap2430_glue *glue = _glue; 185 struct omap2430_glue *glue = _glue;
240 186
241 if (!glue) { 187 if (!glue) {
242 pr_err("%s: musb core is not yet initialized\n", __func__); 188 pr_err("%s: musb core is not yet initialized\n", __func__);
243 return; 189 return -EPROBE_DEFER;
244 } 190 }
245 glue->status = status; 191 glue->status = status;
246 192
247 if (!glue_to_musb(glue)) { 193 if (!glue_to_musb(glue)) {
248 pr_err("%s: musb core is not yet ready\n", __func__); 194 pr_err("%s: musb core is not yet ready\n", __func__);
249 return; 195 return -EPROBE_DEFER;
250 } 196 }
251 197
252 schedule_work(&glue->omap_musb_mailbox_work); 198 schedule_work(&glue->omap_musb_mailbox_work);
199
200 return 0;
253} 201}
254 202
255static void omap_musb_set_mailbox(struct omap2430_glue *glue) 203static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
259 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); 207 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
260 struct omap_musb_board_data *data = pdata->board_data; 208 struct omap_musb_board_data *data = pdata->board_data;
261 struct usb_otg *otg = musb->xceiv->otg; 209 struct usb_otg *otg = musb->xceiv->otg;
210 bool cable_connected;
211
212 cable_connected = ((glue->status == MUSB_ID_GROUND) ||
213 (glue->status == MUSB_VBUS_VALID));
214
215 if (cable_connected)
216 omap2430_set_power(musb, glue->enabled, cable_connected);
262 217
263 switch (glue->status) { 218 switch (glue->status) {
264 case MUSB_ID_GROUND: 219 case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
268 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 223 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
269 musb->xceiv->last_event = USB_EVENT_ID; 224 musb->xceiv->last_event = USB_EVENT_ID;
270 if (musb->gadget_driver) { 225 if (musb->gadget_driver) {
271 pm_runtime_get_sync(dev);
272 omap_control_usb_set_mode(glue->control_otghs, 226 omap_control_usb_set_mode(glue->control_otghs,
273 USB_MODE_HOST); 227 USB_MODE_HOST);
274 omap2430_musb_set_vbus(musb, 1); 228 omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
281 otg->default_a = false; 235 otg->default_a = false;
282 musb->xceiv->otg->state = OTG_STATE_B_IDLE; 236 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
283 musb->xceiv->last_event = USB_EVENT_VBUS; 237 musb->xceiv->last_event = USB_EVENT_VBUS;
284 if (musb->gadget_driver)
285 pm_runtime_get_sync(dev);
286 omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); 238 omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
287 break; 239 break;
288 240
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
291 dev_dbg(dev, "VBUS Disconnect\n"); 243 dev_dbg(dev, "VBUS Disconnect\n");
292 244
293 musb->xceiv->last_event = USB_EVENT_NONE; 245 musb->xceiv->last_event = USB_EVENT_NONE;
294 if (musb->gadget_driver) { 246 if (musb->gadget_driver)
295 omap2430_musb_set_vbus(musb, 0); 247 omap2430_musb_set_vbus(musb, 0);
296 pm_runtime_mark_last_busy(dev);
297 pm_runtime_put_autosuspend(dev);
298 }
299 248
300 if (data->interface_type == MUSB_INTERFACE_UTMI) 249 if (data->interface_type == MUSB_INTERFACE_UTMI)
301 otg_set_vbus(musb->xceiv->otg, 0); 250 otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
307 dev_dbg(dev, "ID float\n"); 256 dev_dbg(dev, "ID float\n");
308 } 257 }
309 258
259 if (!cable_connected)
260 omap2430_set_power(musb, glue->enabled, cable_connected);
261
310 atomic_notifier_call_chain(&musb->xceiv->notifier, 262 atomic_notifier_call_chain(&musb->xceiv->notifier,
311 musb->xceiv->last_event, NULL); 263 musb->xceiv->last_event, NULL);
312} 264}
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
316{ 268{
317 struct omap2430_glue *glue = container_of(mailbox_work, 269 struct omap2430_glue *glue = container_of(mailbox_work,
318 struct omap2430_glue, omap_musb_mailbox_work); 270 struct omap2430_glue, omap_musb_mailbox_work);
319 struct musb *musb = glue_to_musb(glue);
320 struct device *dev = musb->controller;
321 271
322 pm_runtime_get_sync(dev);
323 omap_musb_set_mailbox(glue); 272 omap_musb_set_mailbox(glue);
324 pm_runtime_mark_last_busy(dev);
325 pm_runtime_put_autosuspend(dev);
326} 273}
327 274
328static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) 275static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
389 return PTR_ERR(musb->phy); 336 return PTR_ERR(musb->phy);
390 } 337 }
391 musb->isr = omap2430_musb_interrupt; 338 musb->isr = omap2430_musb_interrupt;
392 339 phy_init(musb->phy);
393 /*
394 * Enable runtime PM for musb parent (this driver). We can't
395 * do it earlier as struct musb is not yet allocated and we
396 * need to touch the musb registers for runtime PM.
397 */
398 pm_runtime_enable(glue->dev);
399 status = pm_runtime_get_sync(glue->dev);
400 if (status < 0)
401 goto err1;
402
403 status = pm_runtime_get_sync(dev);
404 if (status < 0) {
405 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
406 pm_runtime_put_sync(glue->dev);
407 goto err1;
408 }
409 340
410 l = musb_readl(musb->mregs, OTG_INTERFSEL); 341 l = musb_readl(musb->mregs, OTG_INTERFSEL);
411 342
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
427 musb_readl(musb->mregs, OTG_INTERFSEL), 358 musb_readl(musb->mregs, OTG_INTERFSEL),
428 musb_readl(musb->mregs, OTG_SIMENABLE)); 359 musb_readl(musb->mregs, OTG_SIMENABLE));
429 360
430 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
431
432 if (glue->status != MUSB_UNKNOWN) 361 if (glue->status != MUSB_UNKNOWN)
433 omap_musb_set_mailbox(glue); 362 omap_musb_set_mailbox(glue);
434 363
435 phy_init(musb->phy);
436 phy_power_on(musb->phy);
437
438 pm_runtime_put_noidle(musb->controller);
439 pm_runtime_put_noidle(glue->dev);
440 return 0; 364 return 0;
441
442err1:
443 return status;
444} 365}
445 366
446static void omap2430_musb_enable(struct musb *musb) 367static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
452 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); 373 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
453 struct omap_musb_board_data *data = pdata->board_data; 374 struct omap_musb_board_data *data = pdata->board_data;
454 375
376 if (!WARN_ON(!musb->phy))
377 phy_power_on(musb->phy);
378
379 omap2430_set_power(musb, true, glue->cable_connected);
380
455 switch (glue->status) { 381 switch (glue->status) {
456 382
457 case MUSB_ID_GROUND: 383 case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
487 struct device *dev = musb->controller; 413 struct device *dev = musb->controller;
488 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 414 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
489 415
416 if (!WARN_ON(!musb->phy))
417 phy_power_off(musb->phy);
418
490 if (glue->status != MUSB_UNKNOWN) 419 if (glue->status != MUSB_UNKNOWN)
491 omap_control_usb_set_mode(glue->control_otghs, 420 omap_control_usb_set_mode(glue->control_otghs,
492 USB_MODE_DISCONNECT); 421 USB_MODE_DISCONNECT);
422
423 omap2430_set_power(musb, false, glue->cable_connected);
493} 424}
494 425
495static int omap2430_musb_exit(struct musb *musb) 426static int omap2430_musb_exit(struct musb *musb)
496{ 427{
497 del_timer_sync(&musb_idle_timer); 428 struct device *dev = musb->controller;
429 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
498 430
499 omap2430_low_level_exit(musb); 431 omap2430_low_level_exit(musb);
500 phy_power_off(musb->phy);
501 phy_exit(musb->phy); 432 phy_exit(musb->phy);
433 musb->phy = NULL;
434 cancel_work_sync(&glue->omap_musb_mailbox_work);
502 435
503 return 0; 436 return 0;
504} 437}
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
512 .init = omap2430_musb_init, 445 .init = omap2430_musb_init,
513 .exit = omap2430_musb_exit, 446 .exit = omap2430_musb_exit,
514 447
515 .set_mode = omap2430_musb_set_mode,
516 .try_idle = omap2430_musb_try_idle,
517
518 .set_vbus = omap2430_musb_set_vbus, 448 .set_vbus = omap2430_musb_set_vbus,
519 449
520 .enable = omap2430_musb_enable, 450 .enable = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
639 goto err2; 569 goto err2;
640 } 570 }
641 571
642 /* 572 pm_runtime_enable(glue->dev);
643 * Note that we cannot enable PM runtime yet for this 573 pm_runtime_use_autosuspend(glue->dev);
644 * driver as we need struct musb initialized first. 574 pm_runtime_set_autosuspend_delay(glue->dev, 500);
645 * See omap2430_musb_init above.
646 */
647 575
648 ret = platform_device_add(musb); 576 ret = platform_device_add(musb);
649 if (ret) { 577 if (ret) {
@@ -662,12 +590,14 @@ err0:
662 590
663static int omap2430_remove(struct platform_device *pdev) 591static int omap2430_remove(struct platform_device *pdev)
664{ 592{
665 struct omap2430_glue *glue = platform_get_drvdata(pdev); 593 struct omap2430_glue *glue = platform_get_drvdata(pdev);
594 struct musb *musb = glue_to_musb(glue);
666 595
667 pm_runtime_get_sync(glue->dev); 596 pm_runtime_get_sync(glue->dev);
668 cancel_work_sync(&glue->omap_musb_mailbox_work);
669 platform_device_unregister(glue->musb); 597 platform_device_unregister(glue->musb);
598 omap2430_set_power(musb, false, false);
670 pm_runtime_put_sync(glue->dev); 599 pm_runtime_put_sync(glue->dev);
600 pm_runtime_dont_use_autosuspend(glue->dev);
671 pm_runtime_disable(glue->dev); 601 pm_runtime_disable(glue->dev);
672 602
673 return 0; 603 return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
680 struct omap2430_glue *glue = dev_get_drvdata(dev); 610 struct omap2430_glue *glue = dev_get_drvdata(dev);
681 struct musb *musb = glue_to_musb(glue); 611 struct musb *musb = glue_to_musb(glue);
682 612
683 if (musb) { 613 if (!musb)
684 musb->context.otg_interfsel = musb_readl(musb->mregs, 614 return 0;
685 OTG_INTERFSEL);
686 615
687 omap2430_low_level_exit(musb); 616 musb->context.otg_interfsel = musb_readl(musb->mregs,
688 } 617 OTG_INTERFSEL);
618
619 omap2430_low_level_exit(musb);
689 620
690 return 0; 621 return 0;
691} 622}
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
696 struct musb *musb = glue_to_musb(glue); 627 struct musb *musb = glue_to_musb(glue);
697 628
698 if (!musb) 629 if (!musb)
699 return -EPROBE_DEFER; 630 return 0;
700 631
701 omap2430_low_level_init(musb); 632 omap2430_low_level_init(musb);
702 musb_writel(musb->mregs, OTG_INTERFSEL, 633 musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
738 }, 669 },
739}; 670};
740 671
672module_platform_driver(omap2430_driver);
673
741MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); 674MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
742MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 675MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
743MODULE_LICENSE("GPL v2"); 676MODULE_LICENSE("GPL v2");
744
745static int __init omap2430_init(void)
746{
747 return platform_driver_register(&omap2430_driver);
748}
749subsys_initcall(omap2430_init);
750
751static void __exit omap2430_exit(void)
752{
753 platform_driver_unregister(&omap2430_driver);
754}
755module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index fdab4232cfbf..76500515dd8b 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
80 80
81struct sunxi_glue { 81struct sunxi_glue {
82 struct device *dev; 82 struct device *dev;
83 struct platform_device *musb; 83 struct musb *musb;
84 struct platform_device *musb_pdev;
84 struct clk *clk; 85 struct clk *clk;
85 struct reset_control *rst; 86 struct reset_control *rst;
86 struct phy *phy; 87 struct phy *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
102 return; 103 return;
103 104
104 if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { 105 if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
105 struct musb *musb = platform_get_drvdata(glue->musb); 106 struct musb *musb = glue->musb;
106 unsigned long flags; 107 unsigned long flags;
107 u8 devctl; 108 u8 devctl;
108 109
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
112 if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { 113 if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
113 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 114 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
114 musb->xceiv->otg->default_a = 1; 115 musb->xceiv->otg->default_a = 1;
115 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 116 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
116 MUSB_HST_MODE(musb); 117 MUSB_HST_MODE(musb);
117 devctl |= MUSB_DEVCTL_SESSION; 118 devctl |= MUSB_DEVCTL_SESSION;
118 } else { 119 } else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
145{ 146{
146 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); 147 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
147 148
148 if (is_on) 149 if (is_on) {
149 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 150 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
150 else 151 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
152 } else {
151 clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 153 clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
154 }
152 155
153 schedule_work(&glue->work); 156 schedule_work(&glue->work);
154} 157}
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
264 if (ret) 267 if (ret)
265 goto error_unregister_notifier; 268 goto error_unregister_notifier;
266 269
267 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
268 ret = phy_power_on(glue->phy);
269 if (ret)
270 goto error_phy_exit;
271 set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
272 /* Stop musb work from turning vbus off again */
273 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
274 }
275
276 musb->isr = sunxi_musb_interrupt; 270 musb->isr = sunxi_musb_interrupt;
277 271
278 /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ 272 /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
280 274
281 return 0; 275 return 0;
282 276
283error_phy_exit:
284 phy_exit(glue->phy);
285error_unregister_notifier: 277error_unregister_notifier:
286 if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) 278 if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
287 extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, 279 extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
323 return 0; 315 return 0;
324} 316}
325 317
318static int sunxi_set_mode(struct musb *musb, u8 mode)
319{
320 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
321 int ret;
322
323 if (mode == MUSB_HOST) {
324 ret = phy_power_on(glue->phy);
325 if (ret)
326 return ret;
327
328 set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
329 /* Stop musb work from turning vbus off again */
330 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
331 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
332 }
333
334 return 0;
335}
336
326static void sunxi_musb_enable(struct musb *musb) 337static void sunxi_musb_enable(struct musb *musb)
327{ 338{
328 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); 339 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
329 340
341 glue->musb = musb;
342
330 /* musb_core does not call us in a balanced manner */ 343 /* musb_core does not call us in a balanced manner */
331 if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) 344 if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
332 return; 345 return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
569 .exit = sunxi_musb_exit, 582 .exit = sunxi_musb_exit,
570 .enable = sunxi_musb_enable, 583 .enable = sunxi_musb_enable,
571 .disable = sunxi_musb_disable, 584 .disable = sunxi_musb_disable,
585 .set_mode = sunxi_set_mode,
572 .fifo_offset = sunxi_musb_fifo_offset, 586 .fifo_offset = sunxi_musb_fifo_offset,
573 .ep_offset = sunxi_musb_ep_offset, 587 .ep_offset = sunxi_musb_ep_offset,
574 .busctl_offset = sunxi_musb_busctl_offset, 588 .busctl_offset = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
721 pinfo.data = &pdata; 735 pinfo.data = &pdata;
722 pinfo.size_data = sizeof(pdata); 736 pinfo.size_data = sizeof(pdata);
723 737
724 glue->musb = platform_device_register_full(&pinfo); 738 glue->musb_pdev = platform_device_register_full(&pinfo);
725 if (IS_ERR(glue->musb)) { 739 if (IS_ERR(glue->musb_pdev)) {
726 ret = PTR_ERR(glue->musb); 740 ret = PTR_ERR(glue->musb_pdev);
727 dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); 741 dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
728 goto err_unregister_usb_phy; 742 goto err_unregister_usb_phy;
729 } 743 }
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
740 struct sunxi_glue *glue = platform_get_drvdata(pdev); 754 struct sunxi_glue *glue = platform_get_drvdata(pdev);
741 struct platform_device *usb_phy = glue->usb_phy; 755 struct platform_device *usb_phy = glue->usb_phy;
742 756
743 platform_device_unregister(glue->musb); /* Frees glue ! */ 757 platform_device_unregister(glue->musb_pdev);
744 usb_phy_generic_unregister(usb_phy); 758 usb_phy_generic_unregister(usb_phy);
745 759
746 return 0; 760 return 0;
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 24e2b3cf1867..a72e8d670adc 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -97,6 +97,9 @@ struct twl6030_usb {
97 97
98 struct regulator *usb3v3; 98 struct regulator *usb3v3;
99 99
100 /* used to check initial cable status after probe */
101 struct delayed_work get_status_work;
102
100 /* used to set vbus, in atomic path */ 103 /* used to set vbus, in atomic path */
101 struct work_struct set_vbus_work; 104 struct work_struct set_vbus_work;
102 105
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
227 twl->asleep = 1; 230 twl->asleep = 1;
228 status = MUSB_VBUS_VALID; 231 status = MUSB_VBUS_VALID;
229 twl->linkstat = status; 232 twl->linkstat = status;
230 musb_mailbox(status); 233 ret = musb_mailbox(status);
234 if (ret)
235 twl->linkstat = MUSB_UNKNOWN;
231 } else { 236 } else {
232 if (twl->linkstat != MUSB_UNKNOWN) { 237 if (twl->linkstat != MUSB_UNKNOWN) {
233 status = MUSB_VBUS_OFF; 238 status = MUSB_VBUS_OFF;
234 twl->linkstat = status; 239 twl->linkstat = status;
235 musb_mailbox(status); 240 ret = musb_mailbox(status);
241 if (ret)
242 twl->linkstat = MUSB_UNKNOWN;
236 if (twl->asleep) { 243 if (twl->asleep) {
237 regulator_disable(twl->usb3v3); 244 regulator_disable(twl->usb3v3);
238 twl->asleep = 0; 245 twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
264 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); 271 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
265 status = MUSB_ID_GROUND; 272 status = MUSB_ID_GROUND;
266 twl->linkstat = status; 273 twl->linkstat = status;
267 musb_mailbox(status); 274 ret = musb_mailbox(status);
275 if (ret)
276 twl->linkstat = MUSB_UNKNOWN;
268 } else { 277 } else {
269 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); 278 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
270 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); 279 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
274 return IRQ_HANDLED; 283 return IRQ_HANDLED;
275} 284}
276 285
286static void twl6030_status_work(struct work_struct *work)
287{
288 struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
289 get_status_work.work);
290
291 twl6030_usb_irq(twl->irq2, twl);
292 twl6030_usbotg_irq(twl->irq1, twl);
293}
294
277static int twl6030_enable_irq(struct twl6030_usb *twl) 295static int twl6030_enable_irq(struct twl6030_usb *twl)
278{ 296{
279 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); 297 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
284 REG_INT_MSK_LINE_C); 302 REG_INT_MSK_LINE_C);
285 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, 303 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
286 REG_INT_MSK_STS_C); 304 REG_INT_MSK_STS_C);
287 twl6030_usb_irq(twl->irq2, twl);
288 twl6030_usbotg_irq(twl->irq1, twl);
289 305
290 return 0; 306 return 0;
291} 307}
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
371 dev_warn(&pdev->dev, "could not create sysfs file\n"); 387 dev_warn(&pdev->dev, "could not create sysfs file\n");
372 388
373 INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); 389 INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
390 INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
374 391
375 status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, 392 status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
376 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, 393 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
395 412
396 twl->asleep = 0; 413 twl->asleep = 0;
397 twl6030_enable_irq(twl); 414 twl6030_enable_irq(twl);
415 schedule_delayed_work(&twl->get_status_work, HZ);
398 dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); 416 dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
399 417
400 return 0; 418 return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
404{ 422{
405 struct twl6030_usb *twl = platform_get_drvdata(pdev); 423 struct twl6030_usb *twl = platform_get_drvdata(pdev);
406 424
425 cancel_delayed_work(&twl->get_status_work);
407 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, 426 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
408 REG_INT_MSK_LINE_C); 427 REG_INT_MSK_LINE_C);
409 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, 428 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 2eddbe538cda..5608af4a369d 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
2007 urblist_entry) 2007 urblist_entry)
2008 usb_unlink_urb(urbtrack->urb); 2008 usb_unlink_urb(urbtrack->urb);
2009 spin_unlock_irqrestore(&mos_parport->listlock, flags); 2009 spin_unlock_irqrestore(&mos_parport->listlock, flags);
2010 parport_del_port(mos_parport->pp);
2010 2011
2011 kref_put(&mos_parport->ref_count, destroy_mos_parport); 2012 kref_put(&mos_parport->ref_count, destroy_mos_parport);
2012 } 2013 }
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 4d49fce406e1..5ef014ba6ae8 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
836 if (devinfo->flags & US_FL_BROKEN_FUA) 836 if (devinfo->flags & US_FL_BROKEN_FUA)
837 sdev->broken_fua = 1; 837 sdev->broken_fua = 1;
838 838
839 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
839 return 0; 840 return 0;
840} 841}
841 842
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
848 .slave_configure = uas_slave_configure, 849 .slave_configure = uas_slave_configure,
849 .eh_abort_handler = uas_eh_abort_handler, 850 .eh_abort_handler = uas_eh_abort_handler,
850 .eh_bus_reset_handler = uas_eh_bus_reset_handler, 851 .eh_bus_reset_handler = uas_eh_bus_reset_handler,
851 .can_queue = MAX_CMNDS,
852 .this_id = -1, 852 .this_id = -1,
853 .sg_tablesize = SG_NONE, 853 .sg_tablesize = SG_NONE,
854 .skip_settle_delay = 1, 854 .skip_settle_delay = 1,
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index fca51105974e..2e0450bec1b1 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
941 941
942static int vhci_get_frame_number(struct usb_hcd *hcd) 942static int vhci_get_frame_number(struct usb_hcd *hcd)
943{ 943{
944 pr_err("Not yet implemented\n"); 944 dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
945 return 0; 945 return 0;
946} 946}
947 947
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 93601407dab8..688691d9058d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
749 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) 749 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
750 return count; 750 return count;
751 } else { 751 } else {
752 if (pci_read_vpd(pdev, addr, 4, &data) != 4) 752 data = 0;
753 if (pci_read_vpd(pdev, addr, 4, &data) < 0)
753 return count; 754 return count;
754 *pdata = cpu_to_le32(data); 755 *pdata = cpu_to_le32(data);
755 } 756 }
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e9ea3fef144a..15ecfc9c5f6c 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
228 228
229static void vfio_intx_disable(struct vfio_pci_device *vdev) 229static void vfio_intx_disable(struct vfio_pci_device *vdev)
230{ 230{
231 vfio_intx_set_signal(vdev, -1);
232 vfio_virqfd_disable(&vdev->ctx[0].unmask); 231 vfio_virqfd_disable(&vdev->ctx[0].unmask);
233 vfio_virqfd_disable(&vdev->ctx[0].mask); 232 vfio_virqfd_disable(&vdev->ctx[0].mask);
233 vfio_intx_set_signal(vdev, -1);
234 vdev->irq_type = VFIO_PCI_NUM_IRQS; 234 vdev->irq_type = VFIO_PCI_NUM_IRQS;
235 vdev->num_ctx = 0; 235 vdev->num_ctx = 0;
236 kfree(vdev->ctx); 236 kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
401 struct pci_dev *pdev = vdev->pdev; 401 struct pci_dev *pdev = vdev->pdev;
402 int i; 402 int i;
403 403
404 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
405
406 for (i = 0; i < vdev->num_ctx; i++) { 404 for (i = 0; i < vdev->num_ctx; i++) {
407 vfio_virqfd_disable(&vdev->ctx[i].unmask); 405 vfio_virqfd_disable(&vdev->ctx[i].unmask);
408 vfio_virqfd_disable(&vdev->ctx[i].mask); 406 vfio_virqfd_disable(&vdev->ctx[i].mask);
409 } 407 }
410 408
409 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
410
411 if (msix) { 411 if (msix) {
412 pci_disable_msix(vdev->pdev); 412 pci_disable_msix(vdev->pdev);
413 kfree(vdev->msix); 413 kfree(vdev->msix);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 15a65823aad9..2ba19424e4a1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
515 unsigned long pfn, long npage, int prot) 515 unsigned long pfn, long npage, int prot)
516{ 516{
517 long i; 517 long i;
518 int ret; 518 int ret = 0;
519 519
520 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { 520 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
521 ret = iommu_map(domain->domain, iova, 521 ret = iommu_map(domain->domain, iova,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 8511c648a15c..9d78411a3bf7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -14,7 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
19 19
20struct panel_drv_data { 20struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
25 25
26 struct omap_video_timings timings; 26 struct omap_video_timings timings;
27 27
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 28 bool invert_polarity;
30}; 29};
31 30
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 44
46static const struct of_device_id tvc_of_match[]; 45static const struct of_device_id tvc_of_match[];
47 46
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 47#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 48
54static int tvc_connect(struct omap_dss_device *dssdev) 49static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 94 in->ops.atv->set_timings(in, &ddata->timings);
100 95
101 if (!ddata->dev->of_node) { 96 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 97 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 98
104 in->ops.atv->invert_vid_out_polarity(in, 99 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 100 ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 202
208 ddata->in = in; 203 ddata->in = in;
209 204
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 205 ddata->invert_polarity = pdata->invert_polarity;
212 206
213 dssdev = &ddata->dssdev; 207 dssdev = &ddata->dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index d811e6dcaef7..06e1db34541e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -16,8 +16,7 @@
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static const struct omap_video_timings dvic_default_timings = { 21static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 22 .x_res = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
236 .detect = dvic_detect, 235 .detect = dvic_detect,
237}; 236};
238 237
239static int dvic_probe_pdata(struct platform_device *pdev)
240{
241 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
242 struct connector_dvi_platform_data *pdata;
243 struct omap_dss_device *in, *dssdev;
244 int i2c_bus_num;
245
246 pdata = dev_get_platdata(&pdev->dev);
247 i2c_bus_num = pdata->i2c_bus_num;
248
249 if (i2c_bus_num != -1) {
250 struct i2c_adapter *adapter;
251
252 adapter = i2c_get_adapter(i2c_bus_num);
253 if (!adapter) {
254 dev_err(&pdev->dev,
255 "Failed to get I2C adapter, bus %d\n",
256 i2c_bus_num);
257 return -EPROBE_DEFER;
258 }
259
260 ddata->i2c_adapter = adapter;
261 }
262
263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) {
265 i2c_put_adapter(ddata->i2c_adapter);
266
267 dev_err(&pdev->dev, "Failed to find video source\n");
268 return -EPROBE_DEFER;
269 }
270
271 ddata->in = in;
272
273 dssdev = &ddata->dssdev;
274 dssdev->name = pdata->name;
275
276 return 0;
277}
278
279static int dvic_probe_of(struct platform_device *pdev) 238static int dvic_probe_of(struct platform_device *pdev)
280{ 239{
281 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 240 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
313 struct omap_dss_device *dssdev; 272 struct omap_dss_device *dssdev;
314 int r; 273 int r;
315 274
275 if (!pdev->dev.of_node)
276 return -ENODEV;
277
316 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 278 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
317 if (!ddata) 279 if (!ddata)
318 return -ENOMEM; 280 return -ENOMEM;
319 281
320 platform_set_drvdata(pdev, ddata); 282 platform_set_drvdata(pdev, ddata);
321 283
322 if (dev_get_platdata(&pdev->dev)) { 284 r = dvic_probe_of(pdev);
323 r = dvic_probe_pdata(pdev); 285 if (r)
324 if (r) 286 return r;
325 return r;
326 } else if (pdev->dev.of_node) {
327 r = dvic_probe_of(pdev);
328 if (r)
329 return r;
330 } else {
331 return -ENODEV;
332 }
333 287
334 ddata->timings = dvic_default_timings; 288 ddata->timings = dvic_default_timings;
335 289
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 6ee4129bc0c0..58d5803ede67 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -17,8 +17,7 @@
17 17
18#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23static const struct omap_video_timings hdmic_default_timings = { 22static const struct omap_video_timings hdmic_default_timings = {
24 .x_res = 640, 23 .x_res = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
206 .set_hdmi_infoframe = hdmic_set_infoframe, 205 .set_hdmi_infoframe = hdmic_set_infoframe,
207}; 206};
208 207
209static int hdmic_probe_pdata(struct platform_device *pdev)
210{
211 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
212 struct connector_hdmi_platform_data *pdata;
213 struct omap_dss_device *in, *dssdev;
214
215 pdata = dev_get_platdata(&pdev->dev);
216
217 ddata->hpd_gpio = -ENODEV;
218
219 in = omap_dss_find_output(pdata->source);
220 if (in == NULL) {
221 dev_err(&pdev->dev, "Failed to find video source\n");
222 return -EPROBE_DEFER;
223 }
224
225 ddata->in = in;
226
227 dssdev = &ddata->dssdev;
228 dssdev->name = pdata->name;
229
230 return 0;
231}
232
233static int hdmic_probe_of(struct platform_device *pdev) 208static int hdmic_probe_of(struct platform_device *pdev)
234{ 209{
235 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 210 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
261 struct omap_dss_device *dssdev; 236 struct omap_dss_device *dssdev;
262 int r; 237 int r;
263 238
239 if (!pdev->dev.of_node)
240 return -ENODEV;
241
264 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 242 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
265 if (!ddata) 243 if (!ddata)
266 return -ENOMEM; 244 return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
268 platform_set_drvdata(pdev, ddata); 246 platform_set_drvdata(pdev, ddata);
269 ddata->dev = &pdev->dev; 247 ddata->dev = &pdev->dev;
270 248
271 if (dev_get_platdata(&pdev->dev)) { 249 r = hdmic_probe_of(pdev);
272 r = hdmic_probe_pdata(pdev); 250 if (r)
273 if (r) 251 return r;
274 return r;
275 } else if (pdev->dev.of_node) {
276 r = hdmic_probe_of(pdev);
277 if (r)
278 return r;
279 } else {
280 return -ENODEV;
281 }
282 252
283 if (gpio_is_valid(ddata->hpd_gpio)) { 253 if (gpio_is_valid(ddata->hpd_gpio)) {
284 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, 254 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
index 8c246c213e06..a9a67167cc3d 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22 22
23#include <video/omapdss.h> 23#include <video/omapfb_dss.h>
24 24
25struct panel_drv_data { 25struct panel_drv_data {
26 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
index d9048b3df495..8c0953d069b7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include <video/omapfb_dss.h>
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
166 .get_timings = tfp410_get_timings, 165 .get_timings = tfp410_get_timings,
167}; 166};
168 167
169static int tfp410_probe_pdata(struct platform_device *pdev)
170{
171 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
172 struct encoder_tfp410_platform_data *pdata;
173 struct omap_dss_device *dssdev, *in;
174
175 pdata = dev_get_platdata(&pdev->dev);
176
177 ddata->pd_gpio = pdata->power_down_gpio;
178
179 ddata->data_lines = pdata->data_lines;
180
181 in = omap_dss_find_output(pdata->source);
182 if (in == NULL) {
183 dev_err(&pdev->dev, "Failed to find video source\n");
184 return -ENODEV;
185 }
186
187 ddata->in = in;
188
189 dssdev = &ddata->dssdev;
190 dssdev->name = pdata->name;
191
192 return 0;
193}
194
195static int tfp410_probe_of(struct platform_device *pdev) 168static int tfp410_probe_of(struct platform_device *pdev)
196{ 169{
197 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 170 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
225 struct omap_dss_device *dssdev; 198 struct omap_dss_device *dssdev;
226 int r; 199 int r;
227 200
201 if (!pdev->dev.of_node)
202 return -ENODEV;
203
228 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 204 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
229 if (!ddata) 205 if (!ddata)
230 return -ENOMEM; 206 return -ENOMEM;
231 207
232 platform_set_drvdata(pdev, ddata); 208 platform_set_drvdata(pdev, ddata);
233 209
234 if (dev_get_platdata(&pdev->dev)) { 210 r = tfp410_probe_of(pdev);
235 r = tfp410_probe_pdata(pdev); 211 if (r)
236 if (r) 212 return r;
237 return r;
238 } else if (pdev->dev.of_node) {
239 r = tfp410_probe_of(pdev);
240 if (r)
241 return r;
242 } else {
243 return -ENODEV;
244 }
245 213
246 if (gpio_is_valid(ddata->pd_gpio)) { 214 if (gpio_is_valid(ddata->pd_gpio)) {
247 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, 215 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 677e2545fcbe..80dc47347e21 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index e780fd4f8b46..ace3d818afe5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -16,7 +16,7 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 3414c2609320..b58012b82b6f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -25,8 +25,7 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 29#include <video/mipi_display.h>
31 30
32/* DSI Virtual channel. Hardcoded for now. */ 31/* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
1127 .memory_read = dsicm_memory_read, 1126 .memory_read = dsicm_memory_read,
1128}; 1127};
1129 1128
1130static int dsicm_probe_pdata(struct platform_device *pdev)
1131{
1132 const struct panel_dsicm_platform_data *pdata;
1133 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
1134 struct omap_dss_device *dssdev, *in;
1135
1136 pdata = dev_get_platdata(&pdev->dev);
1137
1138 in = omap_dss_find_output(pdata->source);
1139 if (in == NULL) {
1140 dev_err(&pdev->dev, "failed to find video source\n");
1141 return -EPROBE_DEFER;
1142 }
1143 ddata->in = in;
1144
1145 ddata->reset_gpio = pdata->reset_gpio;
1146
1147 if (pdata->use_ext_te)
1148 ddata->ext_te_gpio = pdata->ext_te_gpio;
1149 else
1150 ddata->ext_te_gpio = -1;
1151
1152 ddata->ulps_timeout = pdata->ulps_timeout;
1153
1154 ddata->use_dsi_backlight = pdata->use_dsi_backlight;
1155
1156 ddata->pin_config = pdata->pin_config;
1157
1158 dssdev = &ddata->dssdev;
1159 dssdev->name = pdata->name;
1160
1161 return 0;
1162}
1163
1164static int dsicm_probe_of(struct platform_device *pdev) 1129static int dsicm_probe_of(struct platform_device *pdev)
1165{ 1130{
1166 struct device_node *node = pdev->dev.of_node; 1131 struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
1207 1172
1208 dev_dbg(dev, "probe\n"); 1173 dev_dbg(dev, "probe\n");
1209 1174
1175 if (!pdev->dev.of_node)
1176 return -ENODEV;
1177
1210 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); 1178 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
1211 if (!ddata) 1179 if (!ddata)
1212 return -ENOMEM; 1180 return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
1214 platform_set_drvdata(pdev, ddata); 1182 platform_set_drvdata(pdev, ddata);
1215 ddata->pdev = pdev; 1183 ddata->pdev = pdev;
1216 1184
1217 if (dev_get_platdata(dev)) { 1185 r = dsicm_probe_of(pdev);
1218 r = dsicm_probe_pdata(pdev); 1186 if (r)
1219 if (r) 1187 return r;
1220 return r;
1221 } else if (pdev->dev.of_node) {
1222 r = dsicm_probe_of(pdev);
1223 if (r)
1224 return r;
1225 } else {
1226 return -ENODEV;
1227 }
1228 1188
1229 ddata->timings.x_res = 864; 1189 ddata->timings.x_res = 864;
1230 ddata->timings.y_res = 480; 1190 ddata->timings.y_res = 480;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c9ec..f14691ce8d02 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -16,8 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static struct omap_video_timings lb035q02_timings = { 21static struct omap_video_timings lb035q02_timings = {
23 .x_res = 320, 22 .x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
240 .get_resolution = omapdss_default_get_resolution, 239 .get_resolution = omapdss_default_get_resolution,
241}; 240};
242 241
243static int lb035q02_probe_pdata(struct spi_device *spi)
244{
245 const struct panel_lb035q02_platform_data *pdata;
246 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
247 struct omap_dss_device *dssdev, *in;
248 int r;
249
250 pdata = dev_get_platdata(&spi->dev);
251
252 in = omap_dss_find_output(pdata->source);
253 if (in == NULL) {
254 dev_err(&spi->dev, "failed to find video source '%s'\n",
255 pdata->source);
256 return -EPROBE_DEFER;
257 }
258
259 ddata->in = in;
260
261 ddata->data_lines = pdata->data_lines;
262
263 dssdev = &ddata->dssdev;
264 dssdev->name = pdata->name;
265
266 r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
267 GPIOF_OUT_INIT_LOW, "panel enable");
268 if (r)
269 goto err_gpio;
270
271 ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
272
273 ddata->backlight_gpio = pdata->backlight_gpio;
274
275 return 0;
276err_gpio:
277 omap_dss_put_device(ddata->in);
278 return r;
279}
280
281static int lb035q02_probe_of(struct spi_device *spi) 242static int lb035q02_probe_of(struct spi_device *spi)
282{ 243{
283 struct device_node *node = spi->dev.of_node; 244 struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
312 struct omap_dss_device *dssdev; 273 struct omap_dss_device *dssdev;
313 int r; 274 int r;
314 275
276 if (!spi->dev.of_node)
277 return -ENODEV;
278
315 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 279 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
316 if (ddata == NULL) 280 if (ddata == NULL)
317 return -ENOMEM; 281 return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
320 284
321 ddata->spi = spi; 285 ddata->spi = spi;
322 286
323 if (dev_get_platdata(&spi->dev)) { 287 r = lb035q02_probe_of(spi);
324 r = lb035q02_probe_pdata(spi); 288 if (r)
325 if (r) 289 return r;
326 return r;
327 } else if (spi->dev.of_node) {
328 r = lb035q02_probe_of(spi);
329 if (r)
330 return r;
331 } else {
332 return -ENODEV;
333 }
334 290
335 if (gpio_is_valid(ddata->backlight_gpio)) { 291 if (gpio_is_valid(ddata->backlight_gpio)) {
336 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, 292 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index 8a928c9a2fc9..a2cbadd3eca3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -18,8 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22#include <video/omap-panel-data.h>
23 22
24struct panel_drv_data { 23struct panel_drv_data {
25 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
233}; 232};
234 233
235 234
236static int nec_8048_probe_pdata(struct spi_device *spi)
237{
238 const struct panel_nec_nl8048hl11_platform_data *pdata;
239 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
240 struct omap_dss_device *dssdev, *in;
241
242 pdata = dev_get_platdata(&spi->dev);
243
244 ddata->qvga_gpio = pdata->qvga_gpio;
245 ddata->res_gpio = pdata->res_gpio;
246
247 in = omap_dss_find_output(pdata->source);
248 if (in == NULL) {
249 dev_err(&spi->dev, "failed to find video source '%s'\n",
250 pdata->source);
251 return -EPROBE_DEFER;
252 }
253 ddata->in = in;
254
255 ddata->data_lines = pdata->data_lines;
256
257 dssdev = &ddata->dssdev;
258 dssdev->name = pdata->name;
259
260 return 0;
261}
262
263static int nec_8048_probe_of(struct spi_device *spi) 235static int nec_8048_probe_of(struct spi_device *spi)
264{ 236{
265 struct device_node *node = spi->dev.of_node; 237 struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
296 268
297 dev_dbg(&spi->dev, "%s\n", __func__); 269 dev_dbg(&spi->dev, "%s\n", __func__);
298 270
271 if (!spi->dev.of_node)
272 return -ENODEV;
273
299 spi->mode = SPI_MODE_0; 274 spi->mode = SPI_MODE_0;
300 spi->bits_per_word = 32; 275 spi->bits_per_word = 32;
301 276
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
315 290
316 ddata->spi = spi; 291 ddata->spi = spi;
317 292
318 if (dev_get_platdata(&spi->dev)) { 293 r = nec_8048_probe_of(spi);
319 r = nec_8048_probe_pdata(spi); 294 if (r)
320 if (r) 295 return r;
321 return r;
322 } else if (spi->dev.of_node) {
323 r = nec_8048_probe_of(spi);
324 if (r)
325 return r;
326 } else {
327 return -ENODEV;
328 }
329 296
330 if (gpio_is_valid(ddata->qvga_gpio)) { 297 if (gpio_is_valid(ddata->qvga_gpio)) {
331 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, 298 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 1954ec913ce5..a8be18a87fa0 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23struct panel_drv_data { 22struct panel_drv_data {
24 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
197 .get_resolution = omapdss_default_get_resolution, 196 .get_resolution = omapdss_default_get_resolution,
198}; 197};
199 198
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod)
202{
203 int r;
204
205 r = devm_gpio_request_one(dev, gpio, flags, desc);
206 if (r) {
207 *gpiod = NULL;
208 return r == -ENOENT ? 0 : r;
209 }
210
211 *gpiod = gpio_to_desc(gpio);
212
213 return 0;
214}
215
216static int sharp_ls_probe_pdata(struct platform_device *pdev)
217{
218 const struct panel_sharp_ls037v7dw01_platform_data *pdata;
219 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
220 struct omap_dss_device *dssdev, *in;
221 int r;
222
223 pdata = dev_get_platdata(&pdev->dev);
224
225 in = omap_dss_find_output(pdata->source);
226 if (in == NULL) {
227 dev_err(&pdev->dev, "failed to find video source '%s'\n",
228 pdata->source);
229 return -EPROBE_DEFER;
230 }
231
232 ddata->in = in;
233
234 ddata->data_lines = pdata->data_lines;
235
236 dssdev = &ddata->dssdev;
237 dssdev->name = pdata->name;
238
239 r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
240 "lcd MO", &ddata->mo_gpio);
241 if (r)
242 return r;
243 r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
244 "lcd LR", &ddata->lr_gpio);
245 if (r)
246 return r;
247 r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
248 "lcd UD", &ddata->ud_gpio);
249 if (r)
250 return r;
251 r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
252 "lcd RESB", &ddata->resb_gpio);
253 if (r)
254 return r;
255 r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
256 "lcd INI", &ddata->ini_gpio);
257 if (r)
258 return r;
259
260 return 0;
261}
262
263static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, 199static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
264 const char *desc, struct gpio_desc **gpiod) 200 const char *desc, struct gpio_desc **gpiod)
265{ 201{
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
330 struct omap_dss_device *dssdev; 266 struct omap_dss_device *dssdev;
331 int r; 267 int r;
332 268
269 if (!pdev->dev.of_node)
270 return -ENODEV;
271
333 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 272 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
334 if (ddata == NULL) 273 if (ddata == NULL)
335 return -ENOMEM; 274 return -ENOMEM;
336 275
337 platform_set_drvdata(pdev, ddata); 276 platform_set_drvdata(pdev, ddata);
338 277
339 if (dev_get_platdata(&pdev->dev)) { 278 r = sharp_ls_probe_of(pdev);
340 r = sharp_ls_probe_pdata(pdev); 279 if (r)
341 if (r) 280 return r;
342 return r;
343 } else if (pdev->dev.of_node) {
344 r = sharp_ls_probe_of(pdev);
345 if (r)
346 return r;
347 } else {
348 return -ENODEV;
349 }
350 281
351 ddata->videomode = sharp_ls_timings; 282 ddata->videomode = sharp_ls_timings;
352 283
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 31efcca801bd..468560a6daae 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -33,7 +33,7 @@
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <video/omap-panel-data.h> 37#include <video/omap-panel-data.h>
38 38
39#define MIPID_CMD_READ_DISP_ID 0x04 39#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 4d657f3ab679..b529a8c2b652 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -28,8 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include <video/omap-panel-data.h>
33 32
34struct panel_drv_data { 33struct panel_drv_data {
35 struct omap_dss_device dssdev; 34 struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
365 .check_timings = td028ttec1_panel_check_timings, 364 .check_timings = td028ttec1_panel_check_timings,
366}; 365};
367 366
368static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
369{
370 const struct panel_tpo_td028ttec1_platform_data *pdata;
371 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
372 struct omap_dss_device *dssdev, *in;
373
374 pdata = dev_get_platdata(&spi->dev);
375
376 in = omap_dss_find_output(pdata->source);
377 if (in == NULL) {
378 dev_err(&spi->dev, "failed to find video source '%s'\n",
379 pdata->source);
380 return -EPROBE_DEFER;
381 }
382
383 ddata->in = in;
384
385 ddata->data_lines = pdata->data_lines;
386
387 dssdev = &ddata->dssdev;
388 dssdev->name = pdata->name;
389
390 return 0;
391}
392
393static int td028ttec1_probe_of(struct spi_device *spi) 367static int td028ttec1_probe_of(struct spi_device *spi)
394{ 368{
395 struct device_node *node = spi->dev.of_node; 369 struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
415 389
416 dev_dbg(&spi->dev, "%s\n", __func__); 390 dev_dbg(&spi->dev, "%s\n", __func__);
417 391
392 if (!spi->dev.of_node)
393 return -ENODEV;
394
418 spi->bits_per_word = 9; 395 spi->bits_per_word = 9;
419 spi->mode = SPI_MODE_3; 396 spi->mode = SPI_MODE_3;
420 397
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
432 409
433 ddata->spi_dev = spi; 410 ddata->spi_dev = spi;
434 411
435 if (dev_get_platdata(&spi->dev)) { 412 r = td028ttec1_probe_of(spi);
436 r = td028ttec1_panel_probe_pdata(spi); 413 if (r)
437 if (r) 414 return r;
438 return r;
439 } else if (spi->dev.of_node) {
440 r = td028ttec1_probe_of(spi);
441 if (r)
442 return r;
443 } else {
444 return -ENODEV;
445 }
446 415
447 ddata->videomode = td028ttec1_panel_timings; 416 ddata->videomode = td028ttec1_panel_timings;
448 417
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 68e3b68a2920..51e628b85f4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -19,8 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include <video/omapfb_dss.h>
23#include <video/omap-panel-data.h>
24 23
25#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
26#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
465}; 464};
466 465
467 466
468static int tpo_td043_probe_pdata(struct spi_device *spi)
469{
470 const struct panel_tpo_td043mtea1_platform_data *pdata;
471 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
472 struct omap_dss_device *dssdev, *in;
473
474 pdata = dev_get_platdata(&spi->dev);
475
476 ddata->nreset_gpio = pdata->nreset_gpio;
477
478 in = omap_dss_find_output(pdata->source);
479 if (in == NULL) {
480 dev_err(&spi->dev, "failed to find video source '%s'\n",
481 pdata->source);
482 return -EPROBE_DEFER;
483 }
484 ddata->in = in;
485
486 ddata->data_lines = pdata->data_lines;
487
488 dssdev = &ddata->dssdev;
489 dssdev->name = pdata->name;
490
491 return 0;
492}
493
494static int tpo_td043_probe_of(struct spi_device *spi) 467static int tpo_td043_probe_of(struct spi_device *spi)
495{ 468{
496 struct device_node *node = spi->dev.of_node; 469 struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
524 497
525 dev_dbg(&spi->dev, "%s\n", __func__); 498 dev_dbg(&spi->dev, "%s\n", __func__);
526 499
500 if (!spi->dev.of_node)
501 return -ENODEV;
502
527 spi->bits_per_word = 16; 503 spi->bits_per_word = 16;
528 spi->mode = SPI_MODE_0; 504 spi->mode = SPI_MODE_0;
529 505
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
541 517
542 ddata->spi = spi; 518 ddata->spi = spi;
543 519
544 if (dev_get_platdata(&spi->dev)) { 520 r = tpo_td043_probe_of(spi);
545 r = tpo_td043_probe_pdata(spi); 521 if (r)
546 if (r) 522 return r;
547 return r;
548 } else if (spi->dev.of_node) {
549 r = tpo_td043_probe_of(spi);
550 if (r)
551 return r;
552 } else {
553 return -ENODEV;
554 }
555 523
556 ddata->mode = TPO_R02_MODE_800x480; 524 ddata->mode = TPO_R02_MODE_800x480;
557 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); 525 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index 663ccc3bf4e5..2481f4871f66 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -23,7 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 5a87179b7312..29de4827589d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -35,7 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include <video/omapfb_dss.h>
39 39
40#include "dss.h" 40#include "dss.h"
41#include "dss_features.h" 41#include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
208 core.default_display_name = def_disp_name; 208 core.default_display_name = def_disp_name;
209 else if (pdata->default_display_name) 209 else if (pdata->default_display_name)
210 core.default_display_name = pdata->default_display_name; 210 core.default_display_name = pdata->default_display_name;
211 else if (pdata->default_device)
212 core.default_display_name = pdata->default_device->name;
213 211
214 register_pm_notifier(&omap_dss_pm_notif_block); 212 register_pm_notifier(&omap_dss_pm_notif_block);
215 213
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 6607db37a5e4..3691bde4ce0a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -26,7 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 5491e304f4fe..7a75dfda9845 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -41,7 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
index 038c15b04215..59c9a5c47ca9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dispc.h" 23#include "dispc.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 75b5286029ee..b3fdbfd0b82d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -25,7 +25,7 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/sysfs.h> 26#include <linux/sysfs.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include "dss.h" 29#include "dss.h"
30 30
31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) 31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index ef5b9027985d..dd5468695c43 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 7953e6a52346..da09806b940c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -34,7 +34,7 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include <video/omapfb_dss.h>
38 38
39#include "dss.h" 39#include "dss.h"
40#include "dss_features.h" 40#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d63e59807707..9e4800a4e3d1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -42,7 +42,7 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h> 45#include <video/omapfb_dss.h>
46#include <video/mipi_display.h> 46#include <video/mipi_display.h>
47 47
48#include "dss.h" 48#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index bf407b6ba15c..d356a252ab4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -18,7 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dss.h" 23#include "dss.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 0078c4d1fc31..47d7f69ad9ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -41,7 +41,7 @@
41#include <linux/suspend.h> 41#include <linux/suspend.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 0184a8461df1..a3cc0ca8f9d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -73,6 +73,17 @@
73#define FLD_MOD(orig, val, start, end) \ 73#define FLD_MOD(orig, val, start, end) \
74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) 74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
75 75
76enum omap_dss_clk_source {
77 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
78 * OMAP4: DSS_FCLK */
79 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
80 * OMAP4: PLL1_CLK1 */
81 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
82 * OMAP4: PLL1_CLK2 */
83 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
84 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
85};
86
76enum dss_io_pad_mode { 87enum dss_io_pad_mode {
77 DSS_IO_PAD_MODE_RESET, 88 DSS_IO_PAD_MODE_RESET,
78 DSS_IO_PAD_MODE_RFBI, 89 DSS_IO_PAD_MODE_RFBI,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index c886a2927f73..8fc843b56b26 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -23,7 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 53616b02b613..f6de87e078b0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -23,7 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27#include <sound/omap-hdmi-audio.h>
27 28
28#include "dss.h" 29#include "dss.h"
29 30
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 2e71aec838b1..926a6f20dbb2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -33,7 +33,7 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
38 38
39#include "hdmi4_core.h" 39#include "hdmi4_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index aade6d99662a..0ee829a165c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -38,7 +38,7 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
43 43
44#include "hdmi5_core.h" 44#include "hdmi5_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index 8ea531d2652c..bbfe7e2d4332 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
51{ 51{
52 void __iomem *base = core->base; 52 void __iomem *base = core->base;
53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ 53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
54 const unsigned ss_scl_high = 4000; /* ns */ 54 const unsigned ss_scl_high = 4600; /* ns */
55 const unsigned ss_scl_low = 4700; /* ns */ 55 const unsigned ss_scl_low = 5400; /* ns */
56 const unsigned fs_scl_high = 600; /* ns */ 56 const unsigned fs_scl_high = 600; /* ns */
57 const unsigned fs_scl_low = 1300; /* ns */ 57 const unsigned fs_scl_low = 1300; /* ns */
58 const unsigned sda_hold = 1000; /* ns */ 58 const unsigned sda_hold = 1000; /* ns */
@@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
442 442
443 c = (ptr[1] >> 6) & 0x3; 443 c = (ptr[1] >> 6) & 0x3;
444 m = (ptr[1] >> 4) & 0x3; 444 m = (ptr[1] >> 4) & 0x3;
445 r = (ptr[1] >> 0) & 0x3; 445 r = (ptr[1] >> 0) & 0xf;
446 446
447 itc = (ptr[2] >> 7) & 0x1; 447 itc = (ptr[2] >> 7) & 0x1;
448 ec = (ptr[2] >> 4) & 0x7; 448 ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 1b8fcc6c4ba1..189a5ad125a3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h> 7#include <video/omapfb_dss.h>
8 8
9#include "hdmi.h" 9#include "hdmi.h"
10 10
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 1f5d19c119ce..9a13c35fd6d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <video/omapdss.h> 16#include <video/omapfb_dss.h>
17 17
18#include "dss.h" 18#include "dss.h"
19#include "hdmi.h" 19#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 06e23a7c432c..eac3665aba6c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "hdmi.h" 23#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 7c544bc56fb5..705373e4cf38 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,7 +14,7 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18 18
19#include "dss.h" 19#include "dss.h"
20#include "hdmi.h" 20#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index a7414fb12830..9e2a67fdf4d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 08a67f4f6a20..69f86d2cc274 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/jiffies.h> 29#include <linux/jiffies.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32 32
33#include "dss.h" 33#include "dss.h"
34#include "dss_features.h" 34#include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
69 break; 69 break;
70 } 70 }
71 71
72 mgr->caps = 0;
73 mgr->supported_displays = 72 mgr->supported_displays =
74 dss_feat_get_supported_displays(mgr->id); 73 dss_feat_get_supported_displays(mgr->id);
75 mgr->supported_outputs = 74 mgr->supported_outputs =
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 16072159bd24..bed9a978269d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include <video/omapfb_dss.h>
25 25
26#include "dss.h" 26#include "dss.h"
27 27
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 4cc5ddebfb34..f1f6c0aea752 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/kobject.h> 26#include <linux/kobject.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 2f7cee985cdd..d6c5d75d2ef8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -30,7 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34 34
35#include "dss.h" 35#include "dss.h"
36#include "dss_features.h" 36#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
index f974ddcd3b6e..0564c5606cd0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
@@ -22,7 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include <video/omapfb_dss.h>
26 26
27#include "dss.h" 27#include "dss.h"
28 28
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
index aea6a1d0fb20..562b0c4ae0c6 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index d747cc6b59e1..c4be732a4714 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 26e0ee30adf8..392464da12e4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -37,7 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include <video/omapfb_dss.h>
41 41
42#include "dss.h" 42#include "dss.h"
43#include "dss_features.h" 43#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index b1ec59e42940..a890540f2037 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "dss_features.h" 23#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b84c..ef69273074ba 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -30,7 +30,7 @@
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/sizes.h> 31#include <linux/sizes.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index d3af01c94a58..2fb90cb6803f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/omapfb.h> 31#include <linux/omapfb.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0033..8087a009c54f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/omapfb.h> 30#include <linux/omapfb.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include <video/omapvrfb.h> 33#include <video/omapvrfb.h>
34 34
35#include "omapfb.h" 35#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a367..bcb9ff4a607d 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
@@ -31,7 +31,7 @@
31#include <linux/dma-attrs.h> 31#include <linux/dma-attrs.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33 33
34#include <video/omapdss.h> 34#include <video/omapfb_dss.h>
35 35
36#ifdef DEBUG 36#ifdef DEBUG
37extern bool omapfb_debug; 37extern bool omapfb_debug;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b54f26c55dfd..b4b3e256491b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -746,7 +746,7 @@ config ALIM7101_WDT
746 746
747config EBC_C384_WDT 747config EBC_C384_WDT
748 tristate "WinSystems EBC-C384 Watchdog Timer" 748 tristate "WinSystems EBC-C384 Watchdog Timer"
749 depends on X86 && ISA 749 depends on X86 && ISA_BUS_API
750 select WATCHDOG_CORE 750 select WATCHDOG_CORE
751 help 751 help
752 Enables watchdog timer support for the watchdog timer on the 752 Enables watchdog timer support for the watchdog timer on the
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d46839f51e73..e4db19e88ab1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
151static void balloon_process(struct work_struct *work); 151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153 153
154static void release_memory_resource(struct resource *resource);
155
156/* When ballooning out (allocating memory to return to Xen) we don't really 154/* When ballooning out (allocating memory to return to Xen) we don't really
157 want the kernel to try too hard since that can trigger the oom killer. */ 155 want the kernel to try too hard since that can trigger the oom killer. */
158#define GFP_BALLOON \ 156#define GFP_BALLOON \
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
248} 246}
249 247
250#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 248#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
249static void release_memory_resource(struct resource *resource)
250{
251 if (!resource)
252 return;
253
254 /*
255 * No need to reset region to identity mapped since we now
256 * know that no I/O can be in this region
257 */
258 release_resource(resource);
259 kfree(resource);
260}
261
251static struct resource *additional_memory_resource(phys_addr_t size) 262static struct resource *additional_memory_resource(phys_addr_t size)
252{ 263{
253 struct resource *res; 264 struct resource *res;
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
286 return res; 297 return res;
287} 298}
288 299
289static void release_memory_resource(struct resource *resource)
290{
291 if (!resource)
292 return;
293
294 /*
295 * No need to reset region to identity mapped since we now
296 * know that no I/O can be in this region
297 */
298 release_resource(resource);
299 kfree(resource);
300}
301
302static enum bp_state reserve_additional_memory(void) 300static enum bp_state reserve_additional_memory(void)
303{ 301{
304 long credit; 302 long credit;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 8e67336f8ddd..6a25533da237 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
183 field_start = OFFSET(cfg_entry); 183 field_start = OFFSET(cfg_entry);
184 field_end = OFFSET(cfg_entry) + field->size; 184 field_end = OFFSET(cfg_entry) + field->size;
185 185
186 if ((req_start >= field_start && req_start < field_end) 186 if (req_end > field_start && field_end > req_start) {
187 || (req_end > field_start && req_end <= field_end)) {
188 err = conf_space_read(dev, cfg_entry, field_start, 187 err = conf_space_read(dev, cfg_entry, field_start,
189 &tmp_val); 188 &tmp_val);
190 if (err) 189 if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
230 field_start = OFFSET(cfg_entry); 229 field_start = OFFSET(cfg_entry);
231 field_end = OFFSET(cfg_entry) + field->size; 230 field_end = OFFSET(cfg_entry) + field->size;
232 231
233 if ((req_start >= field_start && req_start < field_end) 232 if (req_end > field_start && field_end > req_start) {
234 || (req_end > field_start && req_end <= field_end)) {
235 tmp_val = 0; 233 tmp_val = 0;
236 234
237 err = xen_pcibk_config_read(dev, field_start, 235 err = xen_pcibk_config_read(dev, field_start,
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index ad3d17d29c81..9ead1c2ff1dd 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
145 /* A write to obtain the length must happen as a 32-bit write. 145 /* A write to obtain the length must happen as a 32-bit write.
146 * This does not (yet) support writing individual bytes 146 * This does not (yet) support writing individual bytes
147 */ 147 */
148 if (value == ~PCI_ROM_ADDRESS_ENABLE) 148 if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
149 bar->which = 1; 149 bar->which = 1;
150 else { 150 else {
151 u32 tmpval; 151 u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
225 (PCI_BASE_ADDRESS_SPACE_MEMORY | 225 (PCI_BASE_ADDRESS_SPACE_MEMORY |
226 PCI_BASE_ADDRESS_MEM_TYPE_64))) { 226 PCI_BASE_ADDRESS_MEM_TYPE_64))) {
227 bar_info->val = res[pos - 1].start >> 32; 227 bar_info->val = res[pos - 1].start >> 32;
228 bar_info->len_val = res[pos - 1].end >> 32; 228 bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
229 return; 229 return;
230 } 230 }
231 } 231 }
232 232
233 if (!res[pos].flags ||
234 (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
235 IORESOURCE_BUSY)))
236 return;
237
233 bar_info->val = res[pos].start | 238 bar_info->val = res[pos].start |
234 (res[pos].flags & PCI_REGION_FLAG_MASK); 239 (res[pos].flags & PCI_REGION_FLAG_MASK);
235 bar_info->len_val = resource_size(&res[pos]); 240 bar_info->len_val = -resource_size(&res[pos]) |
241 (res[pos].flags & PCI_REGION_FLAG_MASK);
236} 242}
237 243
238static void *bar_init(struct pci_dev *dev, int offset) 244static void *bar_init(struct pci_dev *dev, int offset)
239{ 245{
240 struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); 246 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
241 247
242 if (!bar) 248 if (!bar)
243 return ERR_PTR(-ENOMEM); 249 return ERR_PTR(-ENOMEM);
244 250
245 read_dev_bar(dev, bar, offset, ~0); 251 read_dev_bar(dev, bar, offset, ~0);
246 bar->which = 0;
247 252
248 return bar; 253 return bar;
249} 254}
250 255
251static void *rom_init(struct pci_dev *dev, int offset) 256static void *rom_init(struct pci_dev *dev, int offset)
252{ 257{
253 struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); 258 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
254 259
255 if (!bar) 260 if (!bar)
256 return ERR_PTR(-ENOMEM); 261 return ERR_PTR(-ENOMEM);
257 262
258 read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); 263 read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
259 bar->which = 0;
260 264
261 return bar; 265 return bar;
262} 266}
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index f0d268b97d19..a439548de785 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -70,9 +70,13 @@ struct autofs_info {
70}; 70};
71 71
72#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */ 72#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */
73#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered 73#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
74 * for expiry, so RCU_walk is 74 * for expiry, so RCU_walk is
75 * not permitted 75 * not permitted. If it progresses to
76 * actual expiry attempt, the flag is
77 * not cleared when EXPIRING is set -
78 * in that case it gets cleared only
79 * when it comes to clearing EXPIRING.
76 */ 80 */
77#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ 81#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
78 82
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 9510d8d2e9cd..b493909e7492 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
316 if (ino->flags & AUTOFS_INF_PENDING) 316 if (ino->flags & AUTOFS_INF_PENDING)
317 goto out; 317 goto out;
318 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { 318 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
319 ino->flags |= AUTOFS_INF_NO_RCU; 319 ino->flags |= AUTOFS_INF_WANT_EXPIRE;
320 spin_unlock(&sbi->fs_lock); 320 spin_unlock(&sbi->fs_lock);
321 synchronize_rcu(); 321 synchronize_rcu();
322 spin_lock(&sbi->fs_lock); 322 spin_lock(&sbi->fs_lock);
323 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { 323 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
324 ino->flags |= AUTOFS_INF_EXPIRING; 324 ino->flags |= AUTOFS_INF_EXPIRING;
325 smp_mb();
326 ino->flags &= ~AUTOFS_INF_NO_RCU;
327 init_completion(&ino->expire_complete); 325 init_completion(&ino->expire_complete);
328 spin_unlock(&sbi->fs_lock); 326 spin_unlock(&sbi->fs_lock);
329 return root; 327 return root;
330 } 328 }
331 ino->flags &= ~AUTOFS_INF_NO_RCU; 329 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
332 } 330 }
333out: 331out:
334 spin_unlock(&sbi->fs_lock); 332 spin_unlock(&sbi->fs_lock);
@@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
446 while ((dentry = get_next_positive_subdir(dentry, root))) { 444 while ((dentry = get_next_positive_subdir(dentry, root))) {
447 spin_lock(&sbi->fs_lock); 445 spin_lock(&sbi->fs_lock);
448 ino = autofs4_dentry_ino(dentry); 446 ino = autofs4_dentry_ino(dentry);
449 if (ino->flags & AUTOFS_INF_NO_RCU) 447 if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
450 expired = NULL; 448 expired = NULL;
451 else 449 else
452 expired = should_expire(dentry, mnt, timeout, how); 450 expired = should_expire(dentry, mnt, timeout, how);
@@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
455 continue; 453 continue;
456 } 454 }
457 ino = autofs4_dentry_ino(expired); 455 ino = autofs4_dentry_ino(expired);
458 ino->flags |= AUTOFS_INF_NO_RCU; 456 ino->flags |= AUTOFS_INF_WANT_EXPIRE;
459 spin_unlock(&sbi->fs_lock); 457 spin_unlock(&sbi->fs_lock);
460 synchronize_rcu(); 458 synchronize_rcu();
461 spin_lock(&sbi->fs_lock); 459 spin_lock(&sbi->fs_lock);
@@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
465 goto found; 463 goto found;
466 } 464 }
467 465
468 ino->flags &= ~AUTOFS_INF_NO_RCU; 466 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
469 if (expired != dentry) 467 if (expired != dentry)
470 dput(expired); 468 dput(expired);
471 spin_unlock(&sbi->fs_lock); 469 spin_unlock(&sbi->fs_lock);
@@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
475found: 473found:
476 pr_debug("returning %p %pd\n", expired, expired); 474 pr_debug("returning %p %pd\n", expired, expired);
477 ino->flags |= AUTOFS_INF_EXPIRING; 475 ino->flags |= AUTOFS_INF_EXPIRING;
478 smp_mb();
479 ino->flags &= ~AUTOFS_INF_NO_RCU;
480 init_completion(&ino->expire_complete); 476 init_completion(&ino->expire_complete);
481 spin_unlock(&sbi->fs_lock); 477 spin_unlock(&sbi->fs_lock);
482 spin_lock(&sbi->lookup_lock);
483 spin_lock(&expired->d_parent->d_lock);
484 spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
485 list_move(&expired->d_parent->d_subdirs, &expired->d_child);
486 spin_unlock(&expired->d_lock);
487 spin_unlock(&expired->d_parent->d_lock);
488 spin_unlock(&sbi->lookup_lock);
489 return expired; 478 return expired;
490} 479}
491 480
@@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
496 int status; 485 int status;
497 486
498 /* Block on any pending expire */ 487 /* Block on any pending expire */
499 if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))) 488 if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
500 return 0; 489 return 0;
501 if (rcu_walk) 490 if (rcu_walk)
502 return -ECHILD; 491 return -ECHILD;
@@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb,
554 ino = autofs4_dentry_ino(dentry); 543 ino = autofs4_dentry_ino(dentry);
555 /* avoid rapid-fire expire attempts if expiry fails */ 544 /* avoid rapid-fire expire attempts if expiry fails */
556 ino->last_used = now; 545 ino->last_used = now;
557 ino->flags &= ~AUTOFS_INF_EXPIRING; 546 ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
558 complete_all(&ino->expire_complete); 547 complete_all(&ino->expire_complete);
559 spin_unlock(&sbi->fs_lock); 548 spin_unlock(&sbi->fs_lock);
560 549
@@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
583 spin_lock(&sbi->fs_lock); 572 spin_lock(&sbi->fs_lock);
584 /* avoid rapid-fire expire attempts if expiry fails */ 573 /* avoid rapid-fire expire attempts if expiry fails */
585 ino->last_used = now; 574 ino->last_used = now;
586 ino->flags &= ~AUTOFS_INF_EXPIRING; 575 ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
587 complete_all(&ino->expire_complete); 576 complete_all(&ino->expire_complete);
588 spin_unlock(&sbi->fs_lock); 577 spin_unlock(&sbi->fs_lock);
589 dput(dentry); 578 dput(dentry);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 78bd80298528..3767f6641af1 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
458 */ 458 */
459 struct inode *inode; 459 struct inode *inode;
460 460
461 if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)) 461 if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
462 return 0; 462 return 0;
463 if (d_mountpoint(dentry)) 463 if (d_mountpoint(dentry))
464 return 0; 464 return 0;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 0146d911f468..631f1554c87b 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
66 set_fs(KERNEL_DS); 66 set_fs(KERNEL_DS);
67 67
68 mutex_lock(&sbi->pipe_mutex); 68 mutex_lock(&sbi->pipe_mutex);
69 wr = __vfs_write(file, data, bytes, &file->f_pos); 69 while (bytes) {
70 while (bytes && wr) { 70 wr = __vfs_write(file, data, bytes, &file->f_pos);
71 if (wr <= 0)
72 break;
71 data += wr; 73 data += wr;
72 bytes -= wr; 74 bytes -= wr;
73 wr = __vfs_write(file, data, bytes, &file->f_pos);
74 } 75 }
75 mutex_unlock(&sbi->pipe_mutex); 76 mutex_unlock(&sbi->pipe_mutex);
76 77
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e158b22ef32f..a7a28110dc80 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2275,7 +2275,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2275 goto end_coredump; 2275 goto end_coredump;
2276 2276
2277 /* Align to page */ 2277 /* Align to page */
2278 if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) 2278 if (!dump_skip(cprm, dataoff - cprm->pos))
2279 goto end_coredump; 2279 goto end_coredump;
2280 2280
2281 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; 2281 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 71ade0e556b7..203589311bf8 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
1787 goto end_coredump; 1787 goto end_coredump;
1788 } 1788 }
1789 1789
1790 if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) 1790 if (!dump_skip(cprm, dataoff - cprm->pos))
1791 goto end_coredump; 1791 goto end_coredump;
1792 1792
1793 if (!elf_fdpic_dump_segments(cprm)) 1793 if (!elf_fdpic_dump_segments(cprm))
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index b677a6ea6001..7706c8dc5fa6 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
2645 * This algorithm is recursive because the amount of used stack space 2645 * This algorithm is recursive because the amount of used stack space
2646 * is very small and the max recursion depth is limited. 2646 * is very small and the max recursion depth is limited.
2647 */ 2647 */
2648 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)", 2648 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
2649 btrfsic_get_block_type(state, block), 2649 btrfsic_get_block_type(state, block),
2650 block->logical_bytenr, block->dev_state->name, 2650 block->logical_bytenr, block->dev_state->name,
2651 block->dev_bytenr, block->mirror_num); 2651 block->dev_bytenr, block->mirror_num);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 427c36b430a6..a85cf7d23309 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1373 1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0); 1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); 1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 eb->len);
1377 if (!eb_rewin) { 1378 if (!eb_rewin) {
1378 btrfs_tree_read_unlock_blocking(eb); 1379 btrfs_tree_read_unlock_blocking(eb);
1379 free_extent_buffer(eb); 1380 free_extent_buffer(eb);
@@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1454 } else if (old_root) { 1455 } else if (old_root) {
1455 btrfs_tree_read_unlock(eb_root); 1456 btrfs_tree_read_unlock(eb_root);
1456 free_extent_buffer(eb_root); 1457 free_extent_buffer(eb_root);
1457 eb = alloc_dummy_extent_buffer(root->fs_info, logical); 1458 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 root->nodesize);
1458 } else { 1460 } else {
1459 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); 1461 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1460 eb = btrfs_clone_extent_buffer(eb_root); 1462 eb = btrfs_clone_extent_buffer(eb_root);
@@ -1552,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1552 trans->transid, root->fs_info->generation); 1554 trans->transid, root->fs_info->generation);
1553 1555
1554 if (!should_cow_block(trans, root, buf)) { 1556 if (!should_cow_block(trans, root, buf)) {
1557 trans->dirty = true;
1555 *cow_ret = buf; 1558 *cow_ret = buf;
1556 return 0; 1559 return 0;
1557 } 1560 }
@@ -1783,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
1783 if (!err) { 1786 if (!err) {
1784 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1787 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1785 map_start); 1788 map_start);
1786 } else { 1789 } else if (err == 1) {
1787 read_extent_buffer(eb, &unaligned, 1790 read_extent_buffer(eb, &unaligned,
1788 offset, sizeof(unaligned)); 1791 offset, sizeof(unaligned));
1789 tmp = &unaligned; 1792 tmp = &unaligned;
1793 } else {
1794 return err;
1790 } 1795 }
1791 1796
1792 } else { 1797 } else {
@@ -2510,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
2510 if (!btrfs_buffer_uptodate(tmp, 0, 0)) 2515 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2511 ret = -EIO; 2516 ret = -EIO;
2512 free_extent_buffer(tmp); 2517 free_extent_buffer(tmp);
2518 } else {
2519 ret = PTR_ERR(tmp);
2513 } 2520 }
2514 return ret; 2521 return ret;
2515} 2522}
@@ -2773,8 +2780,10 @@ again:
2773 * then we don't want to set the path blocking, 2780 * then we don't want to set the path blocking,
2774 * so we test it here 2781 * so we test it here
2775 */ 2782 */
2776 if (!should_cow_block(trans, root, b)) 2783 if (!should_cow_block(trans, root, b)) {
2784 trans->dirty = true;
2777 goto cow_done; 2785 goto cow_done;
2786 }
2778 2787
2779 /* 2788 /*
2780 * must have write locks on this node and the 2789 * must have write locks on this node and the
@@ -2823,6 +2832,8 @@ cow_done:
2823 } 2832 }
2824 2833
2825 ret = key_search(b, key, level, &prev_cmp, &slot); 2834 ret = key_search(b, key, level, &prev_cmp, &slot);
2835 if (ret < 0)
2836 goto done;
2826 2837
2827 if (level != 0) { 2838 if (level != 0) {
2828 int dec = 0; 2839 int dec = 0;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 101c3cfd3f7c..4274a7bfdaed 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
2518int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2518int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2519 struct btrfs_root *root, unsigned long count); 2519 struct btrfs_root *root, unsigned long count);
2520int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2520int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2521 unsigned long count, int wait); 2521 unsigned long count, u64 transid, int wait);
2522int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 2522int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
2523int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2523int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2524 struct btrfs_root *root, u64 bytenr, 2524 struct btrfs_root *root, u64 bytenr,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 61561c2a3f96..d3aaabbfada0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1606 return 0; 1606 return 0;
1607} 1607}
1608 1608
1609void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 1609bool btrfs_readdir_get_delayed_items(struct inode *inode,
1610 struct list_head *del_list) 1610 struct list_head *ins_list,
1611 struct list_head *del_list)
1611{ 1612{
1612 struct btrfs_delayed_node *delayed_node; 1613 struct btrfs_delayed_node *delayed_node;
1613 struct btrfs_delayed_item *item; 1614 struct btrfs_delayed_item *item;
1614 1615
1615 delayed_node = btrfs_get_delayed_node(inode); 1616 delayed_node = btrfs_get_delayed_node(inode);
1616 if (!delayed_node) 1617 if (!delayed_node)
1617 return; 1618 return false;
1619
1620 /*
1621 * We can only do one readdir with delayed items at a time because of
1622 * item->readdir_list.
1623 */
1624 inode_unlock_shared(inode);
1625 inode_lock(inode);
1618 1626
1619 mutex_lock(&delayed_node->mutex); 1627 mutex_lock(&delayed_node->mutex);
1620 item = __btrfs_first_delayed_insertion_item(delayed_node); 1628 item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1641 * requeue or dequeue this delayed node. 1649 * requeue or dequeue this delayed node.
1642 */ 1650 */
1643 atomic_dec(&delayed_node->refs); 1651 atomic_dec(&delayed_node->refs);
1652
1653 return true;
1644} 1654}
1645 1655
1646void btrfs_put_delayed_items(struct list_head *ins_list, 1656void btrfs_readdir_put_delayed_items(struct inode *inode,
1647 struct list_head *del_list) 1657 struct list_head *ins_list,
1658 struct list_head *del_list)
1648{ 1659{
1649 struct btrfs_delayed_item *curr, *next; 1660 struct btrfs_delayed_item *curr, *next;
1650 1661
@@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
1659 if (atomic_dec_and_test(&curr->refs)) 1670 if (atomic_dec_and_test(&curr->refs))
1660 kfree(curr); 1671 kfree(curr);
1661 } 1672 }
1673
1674 /*
1675 * The VFS is going to do up_read(), so we need to downgrade back to a
1676 * read lock.
1677 */
1678 downgrade_write(&inode->i_rwsem);
1662} 1679}
1663 1680
1664int btrfs_should_delete_dir_index(struct list_head *del_list, 1681int btrfs_should_delete_dir_index(struct list_head *del_list,
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 0167853c84ae..2495b3d4075f 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
137void btrfs_destroy_delayed_inodes(struct btrfs_root *root); 137void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
138 138
139/* Used for readdir() */ 139/* Used for readdir() */
140void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 140bool btrfs_readdir_get_delayed_items(struct inode *inode,
141 struct list_head *del_list); 141 struct list_head *ins_list,
142void btrfs_put_delayed_items(struct list_head *ins_list, 142 struct list_head *del_list);
143 struct list_head *del_list); 143void btrfs_readdir_put_delayed_items(struct inode *inode,
144 struct list_head *ins_list,
145 struct list_head *del_list);
144int btrfs_should_delete_dir_index(struct list_head *del_list, 146int btrfs_should_delete_dir_index(struct list_head *del_list,
145 u64 index); 147 u64 index);
146int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 148int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6628fca9f4ed..60ce1190307b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1098 struct inode *btree_inode = root->fs_info->btree_inode; 1098 struct inode *btree_inode = root->fs_info->btree_inode;
1099 1099
1100 buf = btrfs_find_create_tree_block(root, bytenr); 1100 buf = btrfs_find_create_tree_block(root, bytenr);
1101 if (!buf) 1101 if (IS_ERR(buf))
1102 return; 1102 return;
1103 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1103 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1104 buf, 0, WAIT_NONE, btree_get_extent, 0); 1104 buf, 0, WAIT_NONE, btree_get_extent, 0);
@@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1114 int ret; 1114 int ret;
1115 1115
1116 buf = btrfs_find_create_tree_block(root, bytenr); 1116 buf = btrfs_find_create_tree_block(root, bytenr);
1117 if (!buf) 1117 if (IS_ERR(buf))
1118 return 0; 1118 return 0;
1119 1119
1120 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1120 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
@@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1147 u64 bytenr) 1147 u64 bytenr)
1148{ 1148{
1149 if (btrfs_test_is_dummy_root(root)) 1149 if (btrfs_test_is_dummy_root(root))
1150 return alloc_test_extent_buffer(root->fs_info, bytenr); 1150 return alloc_test_extent_buffer(root->fs_info, bytenr,
1151 root->nodesize);
1151 return alloc_extent_buffer(root->fs_info, bytenr); 1152 return alloc_extent_buffer(root->fs_info, bytenr);
1152} 1153}
1153 1154
@@ -1171,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1171 int ret; 1172 int ret;
1172 1173
1173 buf = btrfs_find_create_tree_block(root, bytenr); 1174 buf = btrfs_find_create_tree_block(root, bytenr);
1174 if (!buf) 1175 if (IS_ERR(buf))
1175 return ERR_PTR(-ENOMEM); 1176 return buf;
1176 1177
1177 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 1178 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1178 if (ret) { 1179 if (ret) {
@@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1314 1315
1315#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1316#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1316/* Should only be used by the testing infrastructure */ 1317/* Should only be used by the testing infrastructure */
1317struct btrfs_root *btrfs_alloc_dummy_root(void) 1318struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
1318{ 1319{
1319 struct btrfs_root *root; 1320 struct btrfs_root *root;
1320 1321
1321 root = btrfs_alloc_root(NULL, GFP_KERNEL); 1322 root = btrfs_alloc_root(NULL, GFP_KERNEL);
1322 if (!root) 1323 if (!root)
1323 return ERR_PTR(-ENOMEM); 1324 return ERR_PTR(-ENOMEM);
1324 __setup_root(4096, 4096, 4096, root, NULL, 1); 1325 /* We don't use the stripesize in selftest, set it as sectorsize */
1326 __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
1327 BTRFS_ROOT_TREE_OBJECTID);
1325 set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); 1328 set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
1326 root->alloc_bytenr = 0; 1329 root->alloc_bytenr = 0;
1327 1330
@@ -1803,6 +1806,13 @@ static int cleaner_kthread(void *arg)
1803 if (btrfs_need_cleaner_sleep(root)) 1806 if (btrfs_need_cleaner_sleep(root))
1804 goto sleep; 1807 goto sleep;
1805 1808
1809 /*
1810 * Do not do anything if we might cause open_ctree() to block
1811 * before we have finished mounting the filesystem.
1812 */
1813 if (!root->fs_info->open)
1814 goto sleep;
1815
1806 if (!mutex_trylock(&root->fs_info->cleaner_mutex)) 1816 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1807 goto sleep; 1817 goto sleep;
1808 1818
@@ -2517,7 +2527,6 @@ int open_ctree(struct super_block *sb,
2517 int num_backups_tried = 0; 2527 int num_backups_tried = 0;
2518 int backup_index = 0; 2528 int backup_index = 0;
2519 int max_active; 2529 int max_active;
2520 bool cleaner_mutex_locked = false;
2521 2530
2522 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2531 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2523 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2532 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2797,7 +2806,7 @@ int open_ctree(struct super_block *sb,
2797 2806
2798 nodesize = btrfs_super_nodesize(disk_super); 2807 nodesize = btrfs_super_nodesize(disk_super);
2799 sectorsize = btrfs_super_sectorsize(disk_super); 2808 sectorsize = btrfs_super_sectorsize(disk_super);
2800 stripesize = btrfs_super_stripesize(disk_super); 2809 stripesize = sectorsize;
2801 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2810 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2802 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2811 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2803 2812
@@ -2996,13 +3005,6 @@ retry_root_backup:
2996 goto fail_sysfs; 3005 goto fail_sysfs;
2997 } 3006 }
2998 3007
2999 /*
3000 * Hold the cleaner_mutex thread here so that we don't block
3001 * for a long time on btrfs_recover_relocation. cleaner_kthread
3002 * will wait for us to finish mounting the filesystem.
3003 */
3004 mutex_lock(&fs_info->cleaner_mutex);
3005 cleaner_mutex_locked = true;
3006 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3008 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3007 "btrfs-cleaner"); 3009 "btrfs-cleaner");
3008 if (IS_ERR(fs_info->cleaner_kthread)) 3010 if (IS_ERR(fs_info->cleaner_kthread))
@@ -3062,8 +3064,10 @@ retry_root_backup:
3062 ret = btrfs_cleanup_fs_roots(fs_info); 3064 ret = btrfs_cleanup_fs_roots(fs_info);
3063 if (ret) 3065 if (ret)
3064 goto fail_qgroup; 3066 goto fail_qgroup;
3065 /* We locked cleaner_mutex before creating cleaner_kthread. */ 3067
3068 mutex_lock(&fs_info->cleaner_mutex);
3066 ret = btrfs_recover_relocation(tree_root); 3069 ret = btrfs_recover_relocation(tree_root);
3070 mutex_unlock(&fs_info->cleaner_mutex);
3067 if (ret < 0) { 3071 if (ret < 0) {
3068 btrfs_warn(fs_info, "failed to recover relocation: %d", 3072 btrfs_warn(fs_info, "failed to recover relocation: %d",
3069 ret); 3073 ret);
@@ -3071,8 +3075,6 @@ retry_root_backup:
3071 goto fail_qgroup; 3075 goto fail_qgroup;
3072 } 3076 }
3073 } 3077 }
3074 mutex_unlock(&fs_info->cleaner_mutex);
3075 cleaner_mutex_locked = false;
3076 3078
3077 location.objectid = BTRFS_FS_TREE_OBJECTID; 3079 location.objectid = BTRFS_FS_TREE_OBJECTID;
3078 location.type = BTRFS_ROOT_ITEM_KEY; 3080 location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3186,10 +3188,6 @@ fail_cleaner:
3186 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3188 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3187 3189
3188fail_sysfs: 3190fail_sysfs:
3189 if (cleaner_mutex_locked) {
3190 mutex_unlock(&fs_info->cleaner_mutex);
3191 cleaner_mutex_locked = false;
3192 }
3193 btrfs_sysfs_remove_mounted(fs_info); 3191 btrfs_sysfs_remove_mounted(fs_info);
3194 3192
3195fail_fsdev_sysfs: 3193fail_fsdev_sysfs:
@@ -4130,6 +4128,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4130 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 4128 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4131 * done later 4129 * done later
4132 */ 4130 */
4131 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4132 btrfs_err(fs_info, "bytes_used is too small %llu",
4133 btrfs_super_bytes_used(sb));
4134 ret = -EINVAL;
4135 }
4136 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4137 btrfs_err(fs_info, "invalid stripesize %u",
4138 btrfs_super_stripesize(sb));
4139 ret = -EINVAL;
4140 }
4133 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4141 if (btrfs_super_num_devices(sb) > (1UL << 31))
4134 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 4142 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
4135 btrfs_super_num_devices(sb)); 4143 btrfs_super_num_devices(sb));
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 8e79d0070bcf..acba821499a9 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
90void btrfs_free_fs_root(struct btrfs_root *root); 90void btrfs_free_fs_root(struct btrfs_root *root);
91 91
92#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 92#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
93struct btrfs_root *btrfs_alloc_dummy_root(void); 93struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
94#endif 94#endif
95 95
96/* 96/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a400951e8678..82b912a293ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2042 struct btrfs_bio *bbio = NULL; 2042 struct btrfs_bio *bbio = NULL;
2043 2043
2044 2044
2045 /*
2046 * Avoid races with device replace and make sure our bbio has devices
2047 * associated to its stripes that don't go away while we are discarding.
2048 */
2049 btrfs_bio_counter_inc_blocked(root->fs_info);
2045 /* Tell the block device(s) that the sectors can be discarded */ 2050 /* Tell the block device(s) that the sectors can be discarded */
2046 ret = btrfs_map_block(root->fs_info, REQ_DISCARD, 2051 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2047 bytenr, &num_bytes, &bbio, 0); 2052 bytenr, &num_bytes, &bbio, 0);
@@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2074 } 2079 }
2075 btrfs_put_bbio(bbio); 2080 btrfs_put_bbio(bbio);
2076 } 2081 }
2082 btrfs_bio_counter_dec(root->fs_info);
2077 2083
2078 if (actual_bytes) 2084 if (actual_bytes)
2079 *actual_bytes = discarded_bytes; 2085 *actual_bytes = discarded_bytes;
@@ -2829,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2829 2835
2830struct async_delayed_refs { 2836struct async_delayed_refs {
2831 struct btrfs_root *root; 2837 struct btrfs_root *root;
2838 u64 transid;
2832 int count; 2839 int count;
2833 int error; 2840 int error;
2834 int sync; 2841 int sync;
@@ -2844,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2844 2851
2845 async = container_of(work, struct async_delayed_refs, work); 2852 async = container_of(work, struct async_delayed_refs, work);
2846 2853
2854 /* if the commit is already started, we don't need to wait here */
2855 if (btrfs_transaction_blocked(async->root->fs_info))
2856 goto done;
2857
2847 trans = btrfs_join_transaction(async->root); 2858 trans = btrfs_join_transaction(async->root);
2848 if (IS_ERR(trans)) { 2859 if (IS_ERR(trans)) {
2849 async->error = PTR_ERR(trans); 2860 async->error = PTR_ERR(trans);
@@ -2855,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2855 * wait on delayed refs 2866 * wait on delayed refs
2856 */ 2867 */
2857 trans->sync = true; 2868 trans->sync = true;
2869
2870 /* Don't bother flushing if we got into a different transaction */
2871 if (trans->transid > async->transid)
2872 goto end;
2873
2858 ret = btrfs_run_delayed_refs(trans, async->root, async->count); 2874 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2859 if (ret) 2875 if (ret)
2860 async->error = ret; 2876 async->error = ret;
2861 2877end:
2862 ret = btrfs_end_transaction(trans, async->root); 2878 ret = btrfs_end_transaction(trans, async->root);
2863 if (ret && !async->error) 2879 if (ret && !async->error)
2864 async->error = ret; 2880 async->error = ret;
@@ -2870,7 +2886,7 @@ done:
2870} 2886}
2871 2887
2872int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2888int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2873 unsigned long count, int wait) 2889 unsigned long count, u64 transid, int wait)
2874{ 2890{
2875 struct async_delayed_refs *async; 2891 struct async_delayed_refs *async;
2876 int ret; 2892 int ret;
@@ -2882,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2882 async->root = root->fs_info->tree_root; 2898 async->root = root->fs_info->tree_root;
2883 async->count = count; 2899 async->count = count;
2884 async->error = 0; 2900 async->error = 0;
2901 async->transid = transid;
2885 if (wait) 2902 if (wait)
2886 async->sync = 1; 2903 async->sync = 1;
2887 else 2904 else
@@ -8010,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8010 struct extent_buffer *buf; 8027 struct extent_buffer *buf;
8011 8028
8012 buf = btrfs_find_create_tree_block(root, bytenr); 8029 buf = btrfs_find_create_tree_block(root, bytenr);
8013 if (!buf) 8030 if (IS_ERR(buf))
8014 return ERR_PTR(-ENOMEM); 8031 return buf;
8032
8015 btrfs_set_header_generation(buf, trans->transid); 8033 btrfs_set_header_generation(buf, trans->transid);
8016 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); 8034 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8017 btrfs_tree_lock(buf); 8035 btrfs_tree_lock(buf);
@@ -8038,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8038 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 8056 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8039 buf->start + buf->len - 1, GFP_NOFS); 8057 buf->start + buf->len - 1, GFP_NOFS);
8040 } 8058 }
8041 trans->blocks_used++; 8059 trans->dirty = true;
8042 /* this returns a buffer locked for blocking */ 8060 /* this returns a buffer locked for blocking */
8043 return buf; 8061 return buf;
8044} 8062}
@@ -8653,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8653 next = btrfs_find_tree_block(root->fs_info, bytenr); 8671 next = btrfs_find_tree_block(root->fs_info, bytenr);
8654 if (!next) { 8672 if (!next) {
8655 next = btrfs_find_create_tree_block(root, bytenr); 8673 next = btrfs_find_create_tree_block(root, bytenr);
8656 if (!next) 8674 if (IS_ERR(next))
8657 return -ENOMEM; 8675 return PTR_ERR(next);
8676
8658 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, 8677 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8659 level - 1); 8678 level - 1);
8660 reada = 1; 8679 reada = 1;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3cd57825c75f..75533adef998 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2025 bio->bi_iter.bi_size = 0; 2025 bio->bi_iter.bi_size = 0;
2026 map_length = length; 2026 map_length = length;
2027 2027
2028 /*
2029 * Avoid races with device replace and make sure our bbio has devices
2030 * associated to its stripes that don't go away while we are doing the
2031 * read repair operation.
2032 */
2033 btrfs_bio_counter_inc_blocked(fs_info);
2028 ret = btrfs_map_block(fs_info, WRITE, logical, 2034 ret = btrfs_map_block(fs_info, WRITE, logical,
2029 &map_length, &bbio, mirror_num); 2035 &map_length, &bbio, mirror_num);
2030 if (ret) { 2036 if (ret) {
2037 btrfs_bio_counter_dec(fs_info);
2031 bio_put(bio); 2038 bio_put(bio);
2032 return -EIO; 2039 return -EIO;
2033 } 2040 }
@@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2037 dev = bbio->stripes[mirror_num-1].dev; 2044 dev = bbio->stripes[mirror_num-1].dev;
2038 btrfs_put_bbio(bbio); 2045 btrfs_put_bbio(bbio);
2039 if (!dev || !dev->bdev || !dev->writeable) { 2046 if (!dev || !dev->bdev || !dev->writeable) {
2047 btrfs_bio_counter_dec(fs_info);
2040 bio_put(bio); 2048 bio_put(bio);
2041 return -EIO; 2049 return -EIO;
2042 } 2050 }
@@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2045 2053
2046 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { 2054 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2047 /* try to remap that extent elsewhere? */ 2055 /* try to remap that extent elsewhere? */
2056 btrfs_bio_counter_dec(fs_info);
2048 bio_put(bio); 2057 bio_put(bio);
2049 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2058 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2050 return -EIO; 2059 return -EIO;
@@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2054 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 2063 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2055 btrfs_ino(inode), start, 2064 btrfs_ino(inode), start,
2056 rcu_str_deref(dev->name), sector); 2065 rcu_str_deref(dev->name), sector);
2066 btrfs_bio_counter_dec(fs_info);
2057 bio_put(bio); 2067 bio_put(bio);
2058 return 0; 2068 return 0;
2059} 2069}
@@ -4718,16 +4728,16 @@ err:
4718} 4728}
4719 4729
4720struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 4730struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4721 u64 start) 4731 u64 start, u32 nodesize)
4722{ 4732{
4723 unsigned long len; 4733 unsigned long len;
4724 4734
4725 if (!fs_info) { 4735 if (!fs_info) {
4726 /* 4736 /*
4727 * Called only from tests that don't always have a fs_info 4737 * Called only from tests that don't always have a fs_info
4728 * available, but we know that nodesize is 4096 4738 * available
4729 */ 4739 */
4730 len = 4096; 4740 len = nodesize;
4731 } else { 4741 } else {
4732 len = fs_info->tree_root->nodesize; 4742 len = fs_info->tree_root->nodesize;
4733 } 4743 }
@@ -4823,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4823 4833
4824#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4834#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4825struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 4835struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4826 u64 start) 4836 u64 start, u32 nodesize)
4827{ 4837{
4828 struct extent_buffer *eb, *exists = NULL; 4838 struct extent_buffer *eb, *exists = NULL;
4829 int ret; 4839 int ret;
@@ -4831,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4831 eb = find_extent_buffer(fs_info, start); 4841 eb = find_extent_buffer(fs_info, start);
4832 if (eb) 4842 if (eb)
4833 return eb; 4843 return eb;
4834 eb = alloc_dummy_extent_buffer(fs_info, start); 4844 eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
4835 if (!eb) 4845 if (!eb)
4836 return NULL; 4846 return NULL;
4837 eb->fs_info = fs_info; 4847 eb->fs_info = fs_info;
@@ -4882,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4882 int uptodate = 1; 4892 int uptodate = 1;
4883 int ret; 4893 int ret;
4884 4894
4895 if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
4896 btrfs_err(fs_info, "bad tree block start %llu", start);
4897 return ERR_PTR(-EINVAL);
4898 }
4899
4885 eb = find_extent_buffer(fs_info, start); 4900 eb = find_extent_buffer(fs_info, start);
4886 if (eb) 4901 if (eb)
4887 return eb; 4902 return eb;
4888 4903
4889 eb = __alloc_extent_buffer(fs_info, start, len); 4904 eb = __alloc_extent_buffer(fs_info, start, len);
4890 if (!eb) 4905 if (!eb)
4891 return NULL; 4906 return ERR_PTR(-ENOMEM);
4892 4907
4893 for (i = 0; i < num_pages; i++, index++) { 4908 for (i = 0; i < num_pages; i++, index++) {
4894 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); 4909 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4895 if (!p) 4910 if (!p) {
4911 exists = ERR_PTR(-ENOMEM);
4896 goto free_eb; 4912 goto free_eb;
4913 }
4897 4914
4898 spin_lock(&mapping->private_lock); 4915 spin_lock(&mapping->private_lock);
4899 if (PagePrivate(p)) { 4916 if (PagePrivate(p)) {
@@ -4938,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4938 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4955 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4939again: 4956again:
4940 ret = radix_tree_preload(GFP_NOFS); 4957 ret = radix_tree_preload(GFP_NOFS);
4941 if (ret) 4958 if (ret) {
4959 exists = ERR_PTR(ret);
4942 goto free_eb; 4960 goto free_eb;
4961 }
4943 4962
4944 spin_lock(&fs_info->buffer_lock); 4963 spin_lock(&fs_info->buffer_lock);
4945 ret = radix_tree_insert(&fs_info->buffer_radix, 4964 ret = radix_tree_insert(&fs_info->buffer_radix,
@@ -5323,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5323 return ret; 5342 return ret;
5324} 5343}
5325 5344
5345/*
5346 * return 0 if the item is found within a page.
5347 * return 1 if the item spans two pages.
5348 * return -EINVAL otherwise.
5349 */
5326int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 5350int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5327 unsigned long min_len, char **map, 5351 unsigned long min_len, char **map,
5328 unsigned long *map_start, 5352 unsigned long *map_start,
@@ -5337,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5337 PAGE_SHIFT; 5361 PAGE_SHIFT;
5338 5362
5339 if (i != end_i) 5363 if (i != end_i)
5340 return -EINVAL; 5364 return 1;
5341 5365
5342 if (i == 0) { 5366 if (i == 0) {
5343 offset = start_offset; 5367 offset = start_offset;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1baf19c9b79d..c0c1c4fef6ce 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
348struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 348struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
349 u64 start, unsigned long len); 349 u64 start, unsigned long len);
350struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 350struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
351 u64 start); 351 u64 start, u32 nodesize);
352struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); 352struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
353struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 353struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
354 u64 start); 354 u64 start);
@@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
468 u64 *end, u64 max_bytes); 468 u64 *end, u64 max_bytes);
469#endif 469#endif
470struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 470struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
471 u64 start); 471 u64 start, u32 nodesize);
472#endif 472#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e0c9bd3fb02d..2234e88cf674 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1534 reserve_bytes = round_up(write_bytes + sector_offset, 1534 reserve_bytes = round_up(write_bytes + sector_offset,
1535 root->sectorsize); 1535 root->sectorsize);
1536 1536
1537 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1538 BTRFS_INODE_PREALLOC)) &&
1539 check_can_nocow(inode, pos, &write_bytes) > 0) {
1540 /*
1541 * For nodata cow case, no need to reserve
1542 * data space.
1543 */
1544 only_release_metadata = true;
1545 /*
1546 * our prealloc extent may be smaller than
1547 * write_bytes, so scale down.
1548 */
1549 num_pages = DIV_ROUND_UP(write_bytes + offset,
1550 PAGE_SIZE);
1551 reserve_bytes = round_up(write_bytes + sector_offset,
1552 root->sectorsize);
1553 goto reserve_metadata;
1554 }
1555
1556 ret = btrfs_check_data_free_space(inode, pos, write_bytes); 1537 ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1557 if (ret < 0) 1538 if (ret < 0) {
1558 break; 1539 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1540 BTRFS_INODE_PREALLOC)) &&
1541 check_can_nocow(inode, pos, &write_bytes) > 0) {
1542 /*
1543 * For nodata cow case, no need to reserve
1544 * data space.
1545 */
1546 only_release_metadata = true;
1547 /*
1548 * our prealloc extent may be smaller than
1549 * write_bytes, so scale down.
1550 */
1551 num_pages = DIV_ROUND_UP(write_bytes + offset,
1552 PAGE_SIZE);
1553 reserve_bytes = round_up(write_bytes +
1554 sector_offset,
1555 root->sectorsize);
1556 } else {
1557 break;
1558 }
1559 }
1559 1560
1560reserve_metadata:
1561 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); 1561 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1562 if (ret) { 1562 if (ret) {
1563 if (!only_release_metadata) 1563 if (!only_release_metadata)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index c6dc1183f542..69d270f6602c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h" 30#include "volumes.h"
31 31
32#define BITS_PER_BITMAP (PAGE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K 33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34 34
35struct btrfs_trim_range { 35struct btrfs_trim_range {
@@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1415 u64 offset) 1415 u64 offset)
1416{ 1416{
1417 u64 bitmap_start; 1417 u64 bitmap_start;
1418 u32 bytes_per_bitmap; 1418 u64 bytes_per_bitmap;
1419 1419
1420 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; 1420 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1421 bitmap_start = offset - ctl->start; 1421 bitmap_start = offset - ctl->start;
1422 bitmap_start = div_u64(bitmap_start, bytes_per_bitmap); 1422 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1423 bitmap_start *= bytes_per_bitmap; 1423 bitmap_start *= bytes_per_bitmap;
1424 bitmap_start += ctl->start; 1424 bitmap_start += ctl->start;
1425 1425
@@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1638 u64 bitmap_bytes; 1638 u64 bitmap_bytes;
1639 u64 extent_bytes; 1639 u64 extent_bytes;
1640 u64 size = block_group->key.offset; 1640 u64 size = block_group->key.offset;
1641 u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 1641 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1642 u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg); 1642 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1643 1643
1644 max_bitmaps = max_t(u32, max_bitmaps, 1); 1644 max_bitmaps = max_t(u64, max_bitmaps, 1);
1645 1645
1646 ASSERT(ctl->total_bitmaps <= max_bitmaps); 1646 ASSERT(ctl->total_bitmaps <= max_bitmaps);
1647 1647
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1661 * we add more bitmaps. 1661 * we add more bitmaps.
1662 */ 1662 */
1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; 1663 bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1664 1664
1665 if (bitmap_bytes >= max_bytes) { 1665 if (bitmap_bytes >= max_bytes) {
1666 ctl->extents_thresh = 0; 1666 ctl->extents_thresh = 0;
@@ -3662,7 +3662,7 @@ have_info:
3662 if (tmp->offset + tmp->bytes < offset) 3662 if (tmp->offset + tmp->bytes < offset)
3663 break; 3663 break;
3664 if (offset + bytes < tmp->offset) { 3664 if (offset + bytes < tmp->offset) {
3665 n = rb_prev(&info->offset_index); 3665 n = rb_prev(&tmp->offset_index);
3666 continue; 3666 continue;
3667 } 3667 }
3668 info = tmp; 3668 info = tmp;
@@ -3676,7 +3676,7 @@ have_info:
3676 if (offset + bytes < tmp->offset) 3676 if (offset + bytes < tmp->offset)
3677 break; 3677 break;
3678 if (tmp->offset + tmp->bytes < offset) { 3678 if (tmp->offset + tmp->bytes < offset) {
3679 n = rb_next(&info->offset_index); 3679 n = rb_next(&tmp->offset_index);
3680 continue; 3680 continue;
3681 } 3681 }
3682 info = tmp; 3682 info = tmp;
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index aae520b2aee5..a97fdc156a03 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -24,6 +24,11 @@ int __init btrfs_hash_init(void)
24 return PTR_ERR_OR_ZERO(tfm); 24 return PTR_ERR_OR_ZERO(tfm);
25} 25}
26 26
27const char* btrfs_crc32c_impl(void)
28{
29 return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
30}
31
27void btrfs_hash_exit(void) 32void btrfs_hash_exit(void)
28{ 33{
29 crypto_free_shash(tfm); 34 crypto_free_shash(tfm);
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
index 118a2316e5d3..c3a2ec554361 100644
--- a/fs/btrfs/hash.h
+++ b/fs/btrfs/hash.h
@@ -22,6 +22,7 @@
22int __init btrfs_hash_init(void); 22int __init btrfs_hash_init(void);
23 23
24void btrfs_hash_exit(void); 24void btrfs_hash_exit(void);
25const char* btrfs_crc32c_impl(void);
25 26
26u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); 27u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
27 28
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 270499598ed4..4421954720b8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3271 /* grab metadata reservation from transaction handle */ 3271 /* grab metadata reservation from transaction handle */
3272 if (reserve) { 3272 if (reserve) {
3273 ret = btrfs_orphan_reserve_metadata(trans, inode); 3273 ret = btrfs_orphan_reserve_metadata(trans, inode);
3274 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 3274 ASSERT(!ret);
3275 if (ret) {
3276 atomic_dec(&root->orphan_inodes);
3277 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3278 &BTRFS_I(inode)->runtime_flags);
3279 if (insert)
3280 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3281 &BTRFS_I(inode)->runtime_flags);
3282 return ret;
3283 }
3275 } 3284 }
3276 3285
3277 /* insert an orphan item to track this unlinked/truncated file */ 3286 /* insert an orphan item to track this unlinked/truncated file */
@@ -4549,6 +4558,7 @@ delete:
4549 BUG_ON(ret); 4558 BUG_ON(ret);
4550 if (btrfs_should_throttle_delayed_refs(trans, root)) 4559 if (btrfs_should_throttle_delayed_refs(trans, root))
4551 btrfs_async_run_delayed_refs(root, 4560 btrfs_async_run_delayed_refs(root,
4561 trans->transid,
4552 trans->delayed_ref_updates * 2, 0); 4562 trans->delayed_ref_updates * 2, 0);
4553 if (be_nice) { 4563 if (be_nice) {
4554 if (truncate_space_check(trans, root, 4564 if (truncate_space_check(trans, root,
@@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5748 int name_len; 5758 int name_len;
5749 int is_curr = 0; /* ctx->pos points to the current index? */ 5759 int is_curr = 0; /* ctx->pos points to the current index? */
5750 bool emitted; 5760 bool emitted;
5761 bool put = false;
5751 5762
5752 /* FIXME, use a real flag for deciding about the key type */ 5763 /* FIXME, use a real flag for deciding about the key type */
5753 if (root->fs_info->tree_root == root) 5764 if (root->fs_info->tree_root == root)
@@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5765 if (key_type == BTRFS_DIR_INDEX_KEY) { 5776 if (key_type == BTRFS_DIR_INDEX_KEY) {
5766 INIT_LIST_HEAD(&ins_list); 5777 INIT_LIST_HEAD(&ins_list);
5767 INIT_LIST_HEAD(&del_list); 5778 INIT_LIST_HEAD(&del_list);
5768 btrfs_get_delayed_items(inode, &ins_list, &del_list); 5779 put = btrfs_readdir_get_delayed_items(inode, &ins_list,
5780 &del_list);
5769 } 5781 }
5770 5782
5771 key.type = key_type; 5783 key.type = key_type;
@@ -5912,8 +5924,8 @@ next:
5912nopos: 5924nopos:
5913 ret = 0; 5925 ret = 0;
5914err: 5926err:
5915 if (key_type == BTRFS_DIR_INDEX_KEY) 5927 if (put)
5916 btrfs_put_delayed_items(&ins_list, &del_list); 5928 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5917 btrfs_free_path(path); 5929 btrfs_free_path(path);
5918 return ret; 5930 return ret;
5919} 5931}
@@ -6979,7 +6991,18 @@ insert:
6979 * existing will always be non-NULL, since there must be 6991 * existing will always be non-NULL, since there must be
6980 * extent causing the -EEXIST. 6992 * extent causing the -EEXIST.
6981 */ 6993 */
6982 if (start >= extent_map_end(existing) || 6994 if (existing->start == em->start &&
6995 extent_map_end(existing) == extent_map_end(em) &&
6996 em->block_start == existing->block_start) {
6997 /*
6998 * these two extents are the same, it happens
6999 * with inlines especially
7000 */
7001 free_extent_map(em);
7002 em = existing;
7003 err = 0;
7004
7005 } else if (start >= extent_map_end(existing) ||
6983 start <= existing->start) { 7006 start <= existing->start) {
6984 /* 7007 /*
6985 * The existing extent map is the one nearest to 7008 * The existing extent map is the one nearest to
@@ -10514,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
10514static const struct file_operations btrfs_dir_file_operations = { 10537static const struct file_operations btrfs_dir_file_operations = {
10515 .llseek = generic_file_llseek, 10538 .llseek = generic_file_llseek,
10516 .read = generic_read_dir, 10539 .read = generic_read_dir,
10517 .iterate = btrfs_real_readdir, 10540 .iterate_shared = btrfs_real_readdir,
10518 .unlocked_ioctl = btrfs_ioctl, 10541 .unlocked_ioctl = btrfs_ioctl,
10519#ifdef CONFIG_COMPAT 10542#ifdef CONFIG_COMPAT
10520 .compat_ioctl = btrfs_compat_ioctl, 10543 .compat_ioctl = btrfs_compat_ioctl,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 559170464d7c..aca8264f4a49 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
718 return count; 718 return count;
719} 719}
720 720
721void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, 721int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
722 const u64 range_start, const u64 range_len) 722 const u64 range_start, const u64 range_len)
723{ 723{
724 struct btrfs_root *root; 724 struct btrfs_root *root;
725 struct list_head splice; 725 struct list_head splice;
726 int done; 726 int done;
727 int total_done = 0;
727 728
728 INIT_LIST_HEAD(&splice); 729 INIT_LIST_HEAD(&splice);
729 730
@@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
742 done = btrfs_wait_ordered_extents(root, nr, 743 done = btrfs_wait_ordered_extents(root, nr,
743 range_start, range_len); 744 range_start, range_len);
744 btrfs_put_fs_root(root); 745 btrfs_put_fs_root(root);
746 total_done += done;
745 747
746 spin_lock(&fs_info->ordered_root_lock); 748 spin_lock(&fs_info->ordered_root_lock);
747 if (nr != -1) { 749 if (nr != -1) {
@@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
752 list_splice_tail(&splice, &fs_info->ordered_roots); 754 list_splice_tail(&splice, &fs_info->ordered_roots);
753 spin_unlock(&fs_info->ordered_root_lock); 755 spin_unlock(&fs_info->ordered_root_lock);
754 mutex_unlock(&fs_info->ordered_operations_mutex); 756 mutex_unlock(&fs_info->ordered_operations_mutex);
757
758 return total_done;
755} 759}
756 760
757/* 761/*
@@ -964,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
964 struct rb_node *prev = NULL; 968 struct rb_node *prev = NULL;
965 struct btrfs_ordered_extent *test; 969 struct btrfs_ordered_extent *test;
966 int ret = 1; 970 int ret = 1;
971 u64 orig_offset = offset;
967 972
968 spin_lock_irq(&tree->lock); 973 spin_lock_irq(&tree->lock);
969 if (ordered) { 974 if (ordered) {
@@ -979,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
979 984
980 /* truncate file */ 985 /* truncate file */
981 if (disk_i_size > i_size) { 986 if (disk_i_size > i_size) {
982 BTRFS_I(inode)->disk_i_size = i_size; 987 BTRFS_I(inode)->disk_i_size = orig_offset;
983 ret = 0; 988 ret = 0;
984 goto out; 989 goto out;
985 } 990 }
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 2049c9be85ee..451507776ff5 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
199 u32 *sum, int len); 199 u32 *sum, int len);
200int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, 200int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
201 const u64 range_start, const u64 range_len); 201 const u64 range_start, const u64 range_len);
202void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, 202int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
203 const u64 range_start, const u64 range_len); 203 const u64 range_start, const u64 range_len);
204void btrfs_get_logged_extents(struct inode *inode, 204void btrfs_get_logged_extents(struct inode *inode,
205 struct list_head *logged_list, 205 struct list_head *logged_list,
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 298631eaee78..8428db7cd88f 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
761 761
762 do { 762 do {
763 enqueued = 0; 763 enqueued = 0;
764 mutex_lock(&fs_devices->device_list_mutex);
764 list_for_each_entry(device, &fs_devices->devices, dev_list) { 765 list_for_each_entry(device, &fs_devices->devices, dev_list) {
765 if (atomic_read(&device->reada_in_flight) < 766 if (atomic_read(&device->reada_in_flight) <
766 MAX_IN_FLIGHT) 767 MAX_IN_FLIGHT)
767 enqueued += reada_start_machine_dev(fs_info, 768 enqueued += reada_start_machine_dev(fs_info,
768 device); 769 device);
769 } 770 }
771 mutex_unlock(&fs_devices->device_list_mutex);
770 total += enqueued; 772 total += enqueued;
771 } while (enqueued && total < 10000); 773 } while (enqueued && total < 10000);
772 774
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 46d847f66e4b..70427ef66b04 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3582 */ 3582 */
3583 scrub_pause_on(fs_info); 3583 scrub_pause_on(fs_info);
3584 ret = btrfs_inc_block_group_ro(root, cache); 3584 ret = btrfs_inc_block_group_ro(root, cache);
3585 if (!ret && is_dev_replace) {
3586 /*
3587 * If we are doing a device replace wait for any tasks
3588 * that started dellaloc right before we set the block
3589 * group to RO mode, as they might have just allocated
3590 * an extent from it or decided they could do a nocow
3591 * write. And if any such tasks did that, wait for their
3592 * ordered extents to complete and then commit the
3593 * current transaction, so that we can later see the new
3594 * extent items in the extent tree - the ordered extents
3595 * create delayed data references (for cow writes) when
3596 * they complete, which will be run and insert the
3597 * corresponding extent items into the extent tree when
3598 * we commit the transaction they used when running
3599 * inode.c:btrfs_finish_ordered_io(). We later use
3600 * the commit root of the extent tree to find extents
3601 * to copy from the srcdev into the tgtdev, and we don't
3602 * want to miss any new extents.
3603 */
3604 btrfs_wait_block_group_reservations(cache);
3605 btrfs_wait_nocow_writers(cache);
3606 ret = btrfs_wait_ordered_roots(fs_info, -1,
3607 cache->key.objectid,
3608 cache->key.offset);
3609 if (ret > 0) {
3610 struct btrfs_trans_handle *trans;
3611
3612 trans = btrfs_join_transaction(root);
3613 if (IS_ERR(trans))
3614 ret = PTR_ERR(trans);
3615 else
3616 ret = btrfs_commit_transaction(trans,
3617 root);
3618 if (ret) {
3619 scrub_pause_off(fs_info);
3620 btrfs_put_block_group(cache);
3621 break;
3622 }
3623 }
3624 }
3585 scrub_pause_off(fs_info); 3625 scrub_pause_off(fs_info);
3586 3626
3587 if (ret == 0) { 3627 if (ret == 0) {
@@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3602 break; 3642 break;
3603 } 3643 }
3604 3644
3645 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3605 dev_replace->cursor_right = found_key.offset + length; 3646 dev_replace->cursor_right = found_key.offset + length;
3606 dev_replace->cursor_left = found_key.offset; 3647 dev_replace->cursor_left = found_key.offset;
3607 dev_replace->item_needs_writeback = 1; 3648 dev_replace->item_needs_writeback = 1;
3649 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3608 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, 3650 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3609 found_key.offset, cache, is_dev_replace); 3651 found_key.offset, cache, is_dev_replace);
3610 3652
@@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3640 3682
3641 scrub_pause_off(fs_info); 3683 scrub_pause_off(fs_info);
3642 3684
3685 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3686 dev_replace->cursor_left = dev_replace->cursor_right;
3687 dev_replace->item_needs_writeback = 1;
3688 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3689
3643 if (ro_set) 3690 if (ro_set)
3644 btrfs_dec_block_group_ro(root, cache); 3691 btrfs_dec_block_group_ro(root, cache);
3645 3692
@@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3677 ret = -ENOMEM; 3724 ret = -ENOMEM;
3678 break; 3725 break;
3679 } 3726 }
3680
3681 dev_replace->cursor_left = dev_replace->cursor_right;
3682 dev_replace->item_needs_writeback = 1;
3683skip: 3727skip:
3684 key.offset = found_key.offset + length; 3728 key.offset = found_key.offset + length;
3685 btrfs_release_path(path); 3729 btrfs_release_path(path);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4e59a91a11e0..60e7179ed4b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
235 trans->aborted = errno; 235 trans->aborted = errno;
236 /* Nothing used. The other threads that have joined this 236 /* Nothing used. The other threads that have joined this
237 * transaction may be able to continue. */ 237 * transaction may be able to continue. */
238 if (!trans->blocks_used && list_empty(&trans->new_bgs)) { 238 if (!trans->dirty && list_empty(&trans->new_bgs)) {
239 const char *errstr; 239 const char *errstr;
240 240
241 errstr = btrfs_decode_error(errno); 241 errstr = btrfs_decode_error(errno);
@@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1807 } 1807 }
1808 } 1808 }
1809 sb->s_flags &= ~MS_RDONLY; 1809 sb->s_flags &= ~MS_RDONLY;
1810
1811 fs_info->open = 1;
1810 } 1812 }
1811out: 1813out:
1812 wake_up_process(fs_info->transaction_kthread); 1814 wake_up_process(fs_info->transaction_kthread);
@@ -2303,7 +2305,7 @@ static void btrfs_interface_exit(void)
2303 2305
2304static void btrfs_print_mod_info(void) 2306static void btrfs_print_mod_info(void)
2305{ 2307{
2306 printk(KERN_INFO "Btrfs loaded" 2308 printk(KERN_INFO "Btrfs loaded, crc32c=%s"
2307#ifdef CONFIG_BTRFS_DEBUG 2309#ifdef CONFIG_BTRFS_DEBUG
2308 ", debug=on" 2310 ", debug=on"
2309#endif 2311#endif
@@ -2313,33 +2315,48 @@ static void btrfs_print_mod_info(void)
2313#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2315#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2314 ", integrity-checker=on" 2316 ", integrity-checker=on"
2315#endif 2317#endif
2316 "\n"); 2318 "\n",
2319 btrfs_crc32c_impl());
2317} 2320}
2318 2321
2319static int btrfs_run_sanity_tests(void) 2322static int btrfs_run_sanity_tests(void)
2320{ 2323{
2321 int ret; 2324 int ret, i;
2322 2325 u32 sectorsize, nodesize;
2326 u32 test_sectorsize[] = {
2327 PAGE_SIZE,
2328 };
2323 ret = btrfs_init_test_fs(); 2329 ret = btrfs_init_test_fs();
2324 if (ret) 2330 if (ret)
2325 return ret; 2331 return ret;
2326 2332 for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
2327 ret = btrfs_test_free_space_cache(); 2333 sectorsize = test_sectorsize[i];
2328 if (ret) 2334 for (nodesize = sectorsize;
2329 goto out; 2335 nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
2330 ret = btrfs_test_extent_buffer_operations(); 2336 nodesize <<= 1) {
2331 if (ret) 2337 pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
2332 goto out; 2338 sectorsize, nodesize);
2333 ret = btrfs_test_extent_io(); 2339 ret = btrfs_test_free_space_cache(sectorsize, nodesize);
2334 if (ret) 2340 if (ret)
2335 goto out; 2341 goto out;
2336 ret = btrfs_test_inodes(); 2342 ret = btrfs_test_extent_buffer_operations(sectorsize,
2337 if (ret) 2343 nodesize);
2338 goto out; 2344 if (ret)
2339 ret = btrfs_test_qgroups(); 2345 goto out;
2340 if (ret) 2346 ret = btrfs_test_extent_io(sectorsize, nodesize);
2341 goto out; 2347 if (ret)
2342 ret = btrfs_test_free_space_tree(); 2348 goto out;
2349 ret = btrfs_test_inodes(sectorsize, nodesize);
2350 if (ret)
2351 goto out;
2352 ret = btrfs_test_qgroups(sectorsize, nodesize);
2353 if (ret)
2354 goto out;
2355 ret = btrfs_test_free_space_tree(sectorsize, nodesize);
2356 if (ret)
2357 goto out;
2358 }
2359 }
2343out: 2360out:
2344 btrfs_destroy_test_fs(); 2361 btrfs_destroy_test_fs();
2345 return ret; 2362 return ret;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index f54bf450bad3..02223f3f78f4 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
68 if (IS_ERR(test_mnt)) { 68 if (IS_ERR(test_mnt)) {
69 printk(KERN_ERR "btrfs: cannot mount test file system\n"); 69 printk(KERN_ERR "btrfs: cannot mount test file system\n");
70 unregister_filesystem(&test_type); 70 unregister_filesystem(&test_type);
71 return ret; 71 return PTR_ERR(test_mnt);
72 } 72 }
73 return 0; 73 return 0;
74} 74}
@@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
175} 175}
176 176
177struct btrfs_block_group_cache * 177struct btrfs_block_group_cache *
178btrfs_alloc_dummy_block_group(unsigned long length) 178btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
179{ 179{
180 struct btrfs_block_group_cache *cache; 180 struct btrfs_block_group_cache *cache;
181 181
@@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length)
192 cache->key.objectid = 0; 192 cache->key.objectid = 0;
193 cache->key.offset = length; 193 cache->key.offset = length;
194 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 194 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
195 cache->sectorsize = 4096; 195 cache->sectorsize = sectorsize;
196 cache->full_stripe_len = 4096; 196 cache->full_stripe_len = sectorsize;
197 197
198 INIT_LIST_HEAD(&cache->list); 198 INIT_LIST_HEAD(&cache->list);
199 INIT_LIST_HEAD(&cache->cluster_list); 199 INIT_LIST_HEAD(&cache->cluster_list);
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 054b8c73c951..66fb6b701eb7 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -26,27 +26,28 @@
26struct btrfs_root; 26struct btrfs_root;
27struct btrfs_trans_handle; 27struct btrfs_trans_handle;
28 28
29int btrfs_test_free_space_cache(void); 29int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
30int btrfs_test_extent_buffer_operations(void); 30int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
31int btrfs_test_extent_io(void); 31int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
32int btrfs_test_inodes(void); 32int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
33int btrfs_test_qgroups(void); 33int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
34int btrfs_test_free_space_tree(void); 34int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
35int btrfs_init_test_fs(void); 35int btrfs_init_test_fs(void);
36void btrfs_destroy_test_fs(void); 36void btrfs_destroy_test_fs(void);
37struct inode *btrfs_new_test_inode(void); 37struct inode *btrfs_new_test_inode(void);
38struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); 38struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
39void btrfs_free_dummy_root(struct btrfs_root *root); 39void btrfs_free_dummy_root(struct btrfs_root *root);
40struct btrfs_block_group_cache * 40struct btrfs_block_group_cache *
41btrfs_alloc_dummy_block_group(unsigned long length); 41btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
42void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); 42void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
43void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); 43void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
44#else 44#else
45static inline int btrfs_test_free_space_cache(void) 45static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
46{ 46{
47 return 0; 47 return 0;
48} 48}
49static inline int btrfs_test_extent_buffer_operations(void) 49static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
50 u32 nodesize)
50{ 51{
51 return 0; 52 return 0;
52} 53}
@@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void)
57static inline void btrfs_destroy_test_fs(void) 58static inline void btrfs_destroy_test_fs(void)
58{ 59{
59} 60}
60static inline int btrfs_test_extent_io(void) 61static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
61{ 62{
62 return 0; 63 return 0;
63} 64}
64static inline int btrfs_test_inodes(void) 65static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
65{ 66{
66 return 0; 67 return 0;
67} 68}
68static inline int btrfs_test_qgroups(void) 69static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
69{ 70{
70 return 0; 71 return 0;
71} 72}
72static inline int btrfs_test_free_space_tree(void) 73static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
73{ 74{
74 return 0; 75 return 0;
75} 76}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index f51963a8f929..4f8cbd1ec5ee 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -22,7 +22,7 @@
22#include "../extent_io.h" 22#include "../extent_io.h"
23#include "../disk-io.h" 23#include "../disk-io.h"
24 24
25static int test_btrfs_split_item(void) 25static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
26{ 26{
27 struct btrfs_path *path; 27 struct btrfs_path *path;
28 struct btrfs_root *root; 28 struct btrfs_root *root;
@@ -40,7 +40,7 @@ static int test_btrfs_split_item(void)
40 40
41 test_msg("Running btrfs_split_item tests\n"); 41 test_msg("Running btrfs_split_item tests\n");
42 42
43 root = btrfs_alloc_dummy_root(); 43 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
44 if (IS_ERR(root)) { 44 if (IS_ERR(root)) {
45 test_msg("Could not allocate root\n"); 45 test_msg("Could not allocate root\n");
46 return PTR_ERR(root); 46 return PTR_ERR(root);
@@ -53,7 +53,8 @@ static int test_btrfs_split_item(void)
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 55
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); 56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
57 nodesize);
57 if (!eb) { 58 if (!eb) {
58 test_msg("Could not allocate dummy buffer\n"); 59 test_msg("Could not allocate dummy buffer\n");
59 ret = -ENOMEM; 60 ret = -ENOMEM;
@@ -222,8 +223,8 @@ out:
222 return ret; 223 return ret;
223} 224}
224 225
225int btrfs_test_extent_buffer_operations(void) 226int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
226{ 227{
227 test_msg("Running extent buffer operation tests"); 228 test_msg("Running extent buffer operation tests\n");
228 return test_btrfs_split_item(); 229 return test_btrfs_split_item(sectorsize, nodesize);
229} 230}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 55724607f79b..d19ab0317283 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/sizes.h> 22#include <linux/sizes.h>
23#include "btrfs-tests.h" 23#include "btrfs-tests.h"
24#include "../ctree.h"
24#include "../extent_io.h" 25#include "../extent_io.h"
25 26
26#define PROCESS_UNLOCK (1 << 0) 27#define PROCESS_UNLOCK (1 << 0)
@@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
65 return count; 66 return count;
66} 67}
67 68
68static int test_find_delalloc(void) 69static int test_find_delalloc(u32 sectorsize)
69{ 70{
70 struct inode *inode; 71 struct inode *inode;
71 struct extent_io_tree tmp; 72 struct extent_io_tree tmp;
@@ -113,7 +114,7 @@ static int test_find_delalloc(void)
113 * |--- delalloc ---| 114 * |--- delalloc ---|
114 * |--- search ---| 115 * |--- search ---|
115 */ 116 */
116 set_extent_delalloc(&tmp, 0, 4095, NULL); 117 set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
117 start = 0; 118 start = 0;
118 end = 0; 119 end = 0;
119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 120 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -122,9 +123,9 @@ static int test_find_delalloc(void)
122 test_msg("Should have found at least one delalloc\n"); 123 test_msg("Should have found at least one delalloc\n");
123 goto out_bits; 124 goto out_bits;
124 } 125 }
125 if (start != 0 || end != 4095) { 126 if (start != 0 || end != (sectorsize - 1)) {
126 test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n", 127 test_msg("Expected start 0 end %u, got start %llu end %llu\n",
127 start, end); 128 sectorsize - 1, start, end);
128 goto out_bits; 129 goto out_bits;
129 } 130 }
130 unlock_extent(&tmp, start, end); 131 unlock_extent(&tmp, start, end);
@@ -144,7 +145,7 @@ static int test_find_delalloc(void)
144 test_msg("Couldn't find the locked page\n"); 145 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 146 goto out_bits;
146 } 147 }
147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL); 148 set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
148 start = test_start; 149 start = test_start;
149 end = 0; 150 end = 0;
150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 151 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -172,7 +173,7 @@ static int test_find_delalloc(void)
172 * |--- delalloc ---| 173 * |--- delalloc ---|
173 * |--- search ---| 174 * |--- search ---|
174 */ 175 */
175 test_start = max_bytes + 4096; 176 test_start = max_bytes + sectorsize;
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 177 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_SHIFT); 178 PAGE_SHIFT);
178 if (!locked_page) { 179 if (!locked_page) {
@@ -272,6 +273,16 @@ out:
272 return ret; 273 return ret;
273} 274}
274 275
276/**
277 * test_bit_in_byte - Determine whether a bit is set in a byte
278 * @nr: bit number to test
279 * @addr: Address to start counting from
280 */
281static inline int test_bit_in_byte(int nr, const u8 *addr)
282{
283 return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
284}
285
275static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, 286static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
276 unsigned long len) 287 unsigned long len)
277{ 288{
@@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
298 return -EINVAL; 309 return -EINVAL;
299 } 310 }
300 311
301 bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 312 /* Straddling pages test */
302 sizeof(long) * BITS_PER_BYTE); 313 if (len > PAGE_SIZE) {
303 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, 314 bitmap_set(bitmap,
304 sizeof(long) * BITS_PER_BYTE); 315 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 316 sizeof(long) * BITS_PER_BYTE);
306 test_msg("Setting straddling pages failed\n"); 317 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
307 return -EINVAL; 318 sizeof(long) * BITS_PER_BYTE);
308 } 319 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
320 test_msg("Setting straddling pages failed\n");
321 return -EINVAL;
322 }
309 323
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 324 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap, 325 bitmap_clear(bitmap,
312 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 326 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE); 327 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 328 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, 329 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE); 330 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 331 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n"); 332 test_msg("Clearing straddling pages failed\n");
319 return -EINVAL; 333 return -EINVAL;
334 }
320 } 335 }
321 336
322 /* 337 /*
@@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
333 for (i = 0; i < len * BITS_PER_BYTE; i++) { 348 for (i = 0; i < len * BITS_PER_BYTE; i++) {
334 int bit, bit1; 349 int bit, bit1;
335 350
336 bit = !!test_bit(i, bitmap); 351 bit = !!test_bit_in_byte(i, (u8 *)bitmap);
337 bit1 = !!extent_buffer_test_bit(eb, 0, i); 352 bit1 = !!extent_buffer_test_bit(eb, 0, i);
338 if (bit1 != bit) { 353 if (bit1 != bit) {
339 test_msg("Testing bit pattern failed\n"); 354 test_msg("Testing bit pattern failed\n");
@@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
351 return 0; 366 return 0;
352} 367}
353 368
354static int test_eb_bitmaps(void) 369static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
355{ 370{
356 unsigned long len = PAGE_SIZE * 4; 371 unsigned long len;
357 unsigned long *bitmap; 372 unsigned long *bitmap;
358 struct extent_buffer *eb; 373 struct extent_buffer *eb;
359 int ret; 374 int ret;
360 375
361 test_msg("Running extent buffer bitmap tests\n"); 376 test_msg("Running extent buffer bitmap tests\n");
362 377
378 /*
379 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
380 * BTRFS_MAX_METADATA_BLOCKSIZE.
381 */
382 len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
383 ? sectorsize * 4 : sectorsize;
384
363 bitmap = kmalloc(len, GFP_KERNEL); 385 bitmap = kmalloc(len, GFP_KERNEL);
364 if (!bitmap) { 386 if (!bitmap) {
365 test_msg("Couldn't allocate test bitmap\n"); 387 test_msg("Couldn't allocate test bitmap\n");
@@ -379,7 +401,7 @@ static int test_eb_bitmaps(void)
379 401
380 /* Do it over again with an extent buffer which isn't page-aligned. */ 402 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb); 403 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); 404 eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
383 if (!eb) { 405 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n"); 406 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap); 407 kfree(bitmap);
@@ -393,17 +415,17 @@ out:
393 return ret; 415 return ret;
394} 416}
395 417
396int btrfs_test_extent_io(void) 418int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
397{ 419{
398 int ret; 420 int ret;
399 421
400 test_msg("Running extent I/O tests\n"); 422 test_msg("Running extent I/O tests\n");
401 423
402 ret = test_find_delalloc(); 424 ret = test_find_delalloc(sectorsize);
403 if (ret) 425 if (ret)
404 goto out; 426 goto out;
405 427
406 ret = test_eb_bitmaps(); 428 ret = test_eb_bitmaps(sectorsize, nodesize);
407out: 429out:
408 test_msg("Extent I/O tests finished\n"); 430 test_msg("Extent I/O tests finished\n");
409 return ret; 431 return ret;
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 0eeb8f3d6b67..3956bb2ff84c 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
22#include "../disk-io.h" 22#include "../disk-io.h"
23#include "../free-space-cache.h" 23#include "../free-space-cache.h"
24 24
25#define BITS_PER_BITMAP (PAGE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an extent 28 * This test just does basic sanity checking, making sure we can add an extent
@@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache)
99 return 0; 99 return 0;
100} 100}
101 101
102static int test_bitmaps(struct btrfs_block_group_cache *cache) 102static int test_bitmaps(struct btrfs_block_group_cache *cache,
103 u32 sectorsize)
103{ 104{
104 u64 next_bitmap_offset; 105 u64 next_bitmap_offset;
105 int ret; 106 int ret;
@@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
139 * The first bitmap we have starts at offset 0 so the next one is just 140 * The first bitmap we have starts at offset 0 so the next one is just
140 * at the end of the first bitmap. 141 * at the end of the first bitmap.
141 */ 142 */
142 next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); 143 next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
143 144
144 /* Test a bit straddling two bitmaps */ 145 /* Test a bit straddling two bitmaps */
145 ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, 146 ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
@@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
167} 168}
168 169
169/* This is the high grade jackassery */ 170/* This is the high grade jackassery */
170static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache) 171static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
172 u32 sectorsize)
171{ 173{
172 u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); 174 u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
173 int ret; 175 int ret;
174 176
175 test_msg("Running bitmap and extent tests\n"); 177 test_msg("Running bitmap and extent tests\n");
@@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
401 * requests. 403 * requests.
402 */ 404 */
403static int 405static int
404test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) 406test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
407 u32 sectorsize)
405{ 408{
406 int ret; 409 int ret;
407 u64 offset; 410 u64 offset;
@@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
539 * The goal is to test that the bitmap entry space stealing doesn't 542 * The goal is to test that the bitmap entry space stealing doesn't
540 * steal this space region. 543 * steal this space region.
541 */ 544 */
542 ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096); 545 ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
543 if (ret) { 546 if (ret) {
544 test_msg("Error adding free space: %d\n", ret); 547 test_msg("Error adding free space: %d\n", ret);
545 return ret; 548 return ret;
@@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
597 return -ENOENT; 600 return -ENOENT;
598 } 601 }
599 602
600 if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) { 603 if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
601 test_msg("Cache free space is not 1Mb + 4Kb\n"); 604 test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
602 return -EINVAL; 605 return -EINVAL;
603 } 606 }
604 607
@@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
611 return -EINVAL; 614 return -EINVAL;
612 } 615 }
613 616
614 /* All that remains is a 4Kb free space region in a bitmap. Confirm. */ 617 /*
618 * All that remains is a sectorsize free space region in a bitmap.
619 * Confirm.
620 */
615 ret = check_num_extents_and_bitmaps(cache, 1, 1); 621 ret = check_num_extents_and_bitmaps(cache, 1, 1);
616 if (ret) 622 if (ret)
617 return ret; 623 return ret;
618 624
619 if (cache->free_space_ctl->free_space != 4096) { 625 if (cache->free_space_ctl->free_space != sectorsize) {
620 test_msg("Cache free space is not 4Kb\n"); 626 test_msg("Cache free space is not %u\n", sectorsize);
621 return -EINVAL; 627 return -EINVAL;
622 } 628 }
623 629
624 offset = btrfs_find_space_for_alloc(cache, 630 offset = btrfs_find_space_for_alloc(cache,
625 0, 4096, 0, 631 0, sectorsize, 0,
626 &max_extent_size); 632 &max_extent_size);
627 if (offset != (SZ_128M + SZ_16M)) { 633 if (offset != (SZ_128M + SZ_16M)) {
628 test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", 634 test_msg("Failed to allocate %u, returned offset : %llu\n",
629 offset); 635 sectorsize, offset);
630 return -EINVAL; 636 return -EINVAL;
631 } 637 }
632 638
@@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
733 * The goal is to test that the bitmap entry space stealing doesn't 739 * The goal is to test that the bitmap entry space stealing doesn't
734 * steal this space region. 740 * steal this space region.
735 */ 741 */
736 ret = btrfs_add_free_space(cache, SZ_32M, 8192); 742 ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
737 if (ret) { 743 if (ret) {
738 test_msg("Error adding free space: %d\n", ret); 744 test_msg("Error adding free space: %d\n", ret);
739 return ret; 745 return ret;
@@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
757 763
758 /* 764 /*
759 * Confirm that our extent entry didn't stole all free space from the 765 * Confirm that our extent entry didn't stole all free space from the
760 * bitmap, because of the small 8Kb free space region. 766 * bitmap, because of the small 2 * sectorsize free space region.
761 */ 767 */
762 ret = check_num_extents_and_bitmaps(cache, 2, 1); 768 ret = check_num_extents_and_bitmaps(cache, 2, 1);
763 if (ret) 769 if (ret)
@@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
783 return -ENOENT; 789 return -ENOENT;
784 } 790 }
785 791
786 if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) { 792 if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
787 test_msg("Cache free space is not 1Mb + 8Kb\n"); 793 test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
788 return -EINVAL; 794 return -EINVAL;
789 } 795 }
790 796
@@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
796 return -EINVAL; 802 return -EINVAL;
797 } 803 }
798 804
799 /* All that remains is a 8Kb free space region in a bitmap. Confirm. */ 805 /*
806 * All that remains is 2 * sectorsize free space region
807 * in a bitmap. Confirm.
808 */
800 ret = check_num_extents_and_bitmaps(cache, 1, 1); 809 ret = check_num_extents_and_bitmaps(cache, 1, 1);
801 if (ret) 810 if (ret)
802 return ret; 811 return ret;
803 812
804 if (cache->free_space_ctl->free_space != 8192) { 813 if (cache->free_space_ctl->free_space != 2 * sectorsize) {
805 test_msg("Cache free space is not 8Kb\n"); 814 test_msg("Cache free space is not %u\n", 2 * sectorsize);
806 return -EINVAL; 815 return -EINVAL;
807 } 816 }
808 817
809 offset = btrfs_find_space_for_alloc(cache, 818 offset = btrfs_find_space_for_alloc(cache,
810 0, 8192, 0, 819 0, 2 * sectorsize, 0,
811 &max_extent_size); 820 &max_extent_size);
812 if (offset != SZ_32M) { 821 if (offset != SZ_32M) {
813 test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", 822 test_msg("Failed to allocate %u, offset: %llu\n",
823 2 * sectorsize,
814 offset); 824 offset);
815 return -EINVAL; 825 return -EINVAL;
816 } 826 }
@@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
825 return 0; 835 return 0;
826} 836}
827 837
828int btrfs_test_free_space_cache(void) 838int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
829{ 839{
830 struct btrfs_block_group_cache *cache; 840 struct btrfs_block_group_cache *cache;
831 struct btrfs_root *root = NULL; 841 struct btrfs_root *root = NULL;
@@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void)
833 843
834 test_msg("Running btrfs free space cache tests\n"); 844 test_msg("Running btrfs free space cache tests\n");
835 845
836 cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024); 846 /*
847 * For ppc64 (with 64k page size), bytes per bitmap might be
848 * larger than 1G. To make bitmap test available in ppc64,
849 * alloc dummy block group whose size cross bitmaps.
850 */
851 cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
852 + PAGE_SIZE, sectorsize);
837 if (!cache) { 853 if (!cache) {
838 test_msg("Couldn't run the tests\n"); 854 test_msg("Couldn't run the tests\n");
839 return 0; 855 return 0;
840 } 856 }
841 857
842 root = btrfs_alloc_dummy_root(); 858 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
843 if (IS_ERR(root)) { 859 if (IS_ERR(root)) {
844 ret = PTR_ERR(root); 860 ret = PTR_ERR(root);
845 goto out; 861 goto out;
@@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void)
855 ret = test_extents(cache); 871 ret = test_extents(cache);
856 if (ret) 872 if (ret)
857 goto out; 873 goto out;
858 ret = test_bitmaps(cache); 874 ret = test_bitmaps(cache, sectorsize);
859 if (ret) 875 if (ret)
860 goto out; 876 goto out;
861 ret = test_bitmaps_and_extents(cache); 877 ret = test_bitmaps_and_extents(cache, sectorsize);
862 if (ret) 878 if (ret)
863 goto out; 879 goto out;
864 880
865 ret = test_steal_space_from_bitmap_to_extent(cache); 881 ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
866out: 882out:
867 btrfs_free_dummy_block_group(cache); 883 btrfs_free_dummy_block_group(cache);
868 btrfs_free_dummy_root(root); 884 btrfs_free_dummy_root(root);
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 7cea4462acd5..aac507085ab0 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../disk-io.h" 22#include "../disk-io.h"
@@ -30,7 +31,7 @@ struct free_space_extent {
30 * The test cases align their operations to this in order to hit some of the 31 * The test cases align their operations to this in order to hit some of the
31 * edge cases in the bitmap code. 32 * edge cases in the bitmap code.
32 */ 33 */
33#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096) 34#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
34 35
35static int __check_free_space_extents(struct btrfs_trans_handle *trans, 36static int __check_free_space_extents(struct btrfs_trans_handle *trans,
36 struct btrfs_fs_info *fs_info, 37 struct btrfs_fs_info *fs_info,
@@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
439 struct btrfs_block_group_cache *, 440 struct btrfs_block_group_cache *,
440 struct btrfs_path *); 441 struct btrfs_path *);
441 442
442static int run_test(test_func_t test_func, int bitmaps) 443static int run_test(test_func_t test_func, int bitmaps,
444 u32 sectorsize, u32 nodesize)
443{ 445{
444 struct btrfs_root *root = NULL; 446 struct btrfs_root *root = NULL;
445 struct btrfs_block_group_cache *cache = NULL; 447 struct btrfs_block_group_cache *cache = NULL;
@@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps)
447 struct btrfs_path *path = NULL; 449 struct btrfs_path *path = NULL;
448 int ret; 450 int ret;
449 451
450 root = btrfs_alloc_dummy_root(); 452 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
451 if (IS_ERR(root)) { 453 if (IS_ERR(root)) {
452 test_msg("Couldn't allocate dummy root\n"); 454 test_msg("Couldn't allocate dummy root\n");
453 ret = PTR_ERR(root); 455 ret = PTR_ERR(root);
@@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps)
466 root->fs_info->free_space_root = root; 468 root->fs_info->free_space_root = root;
467 root->fs_info->tree_root = root; 469 root->fs_info->tree_root = root;
468 470
469 root->node = alloc_test_extent_buffer(root->fs_info, 4096); 471 root->node = alloc_test_extent_buffer(root->fs_info,
472 nodesize, nodesize);
470 if (!root->node) { 473 if (!root->node) {
471 test_msg("Couldn't allocate dummy buffer\n"); 474 test_msg("Couldn't allocate dummy buffer\n");
472 ret = -ENOMEM; 475 ret = -ENOMEM;
@@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps)
474 } 477 }
475 btrfs_set_header_level(root->node, 0); 478 btrfs_set_header_level(root->node, 0);
476 btrfs_set_header_nritems(root->node, 0); 479 btrfs_set_header_nritems(root->node, 0);
477 root->alloc_bytenr += 8192; 480 root->alloc_bytenr += 2 * nodesize;
478 481
479 cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE); 482 cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
480 if (!cache) { 483 if (!cache) {
481 test_msg("Couldn't allocate dummy block group cache\n"); 484 test_msg("Couldn't allocate dummy block group cache\n");
482 ret = -ENOMEM; 485 ret = -ENOMEM;
@@ -534,17 +537,18 @@ out:
534 return ret; 537 return ret;
535} 538}
536 539
537static int run_test_both_formats(test_func_t test_func) 540static int run_test_both_formats(test_func_t test_func,
541 u32 sectorsize, u32 nodesize)
538{ 542{
539 int ret; 543 int ret;
540 544
541 ret = run_test(test_func, 0); 545 ret = run_test(test_func, 0, sectorsize, nodesize);
542 if (ret) 546 if (ret)
543 return ret; 547 return ret;
544 return run_test(test_func, 1); 548 return run_test(test_func, 1, sectorsize, nodesize);
545} 549}
546 550
547int btrfs_test_free_space_tree(void) 551int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
548{ 552{
549 test_func_t tests[] = { 553 test_func_t tests[] = {
550 test_empty_block_group, 554 test_empty_block_group,
@@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void)
561 565
562 test_msg("Running free space tree tests\n"); 566 test_msg("Running free space tree tests\n");
563 for (i = 0; i < ARRAY_SIZE(tests); i++) { 567 for (i = 0; i < ARRAY_SIZE(tests); i++) {
564 int ret = run_test_both_formats(tests[i]); 568 int ret = run_test_both_formats(tests[i], sectorsize,
569 nodesize);
565 if (ret) { 570 if (ret) {
566 test_msg("%pf failed\n", tests[i]); 571 test_msg("%pf : sectorsize %u failed\n",
572 tests[i], sectorsize);
567 return ret; 573 return ret;
568 } 574 }
569 } 575 }
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 8a25fe8b7c45..29648c0a39f1 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../btrfs_inode.h" 22#include "../btrfs_inode.h"
@@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root)
86 * diagram of how the extents will look though this may not be possible we still 87 * diagram of how the extents will look though this may not be possible we still
87 * want to make sure everything acts normally (the last number is not inclusive) 88 * want to make sure everything acts normally (the last number is not inclusive)
88 * 89 *
89 * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288] 90 * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
90 * [hole ][inline][ hole ][ regular ][regular1 split][ hole ] 91 * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split]
91 * 92 *
92 * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056] 93 * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
93 * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ] 94 * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
94 * 95 *
95 * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ] 96 * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
96 * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent] 97 * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1]
97 * 98 *
98 * [81920-86016] 99 * [69635-73731][ 73731 - 86019 ][86019-90115]
99 * [ regular ] 100 * [ regular ][ hole but no extent][ regular ]
100 */ 101 */
101static void setup_file_extents(struct btrfs_root *root) 102static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
102{ 103{
103 int slot = 0; 104 int slot = 0;
104 u64 disk_bytenr = SZ_1M; 105 u64 disk_bytenr = SZ_1M;
@@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root)
119 insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, 120 insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
120 slot); 121 slot);
121 slot++; 122 slot++;
122 offset = 4096; 123 offset = sectorsize;
123 124
124 /* Now another hole */ 125 /* Now another hole */
125 insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, 126 insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
@@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root)
128 offset += 4; 129 offset += 4;
129 130
130 /* Now for a regular extent */ 131 /* Now for a regular extent */
131 insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096, 132 insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
132 BTRFS_FILE_EXTENT_REG, 0, slot); 133 disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
133 slot++; 134 slot++;
134 disk_bytenr += 4096; 135 disk_bytenr += sectorsize;
135 offset += 4095; 136 offset += sectorsize - 1;
136 137
137 /* 138 /*
138 * Now for 3 extents that were split from a hole punch so we test 139 * Now for 3 extents that were split from a hole punch so we test
139 * offsets properly. 140 * offsets properly.
140 */ 141 */
141 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, 142 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
142 BTRFS_FILE_EXTENT_REG, 0, slot); 143 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
143 slot++; 144 slot++;
144 offset += 4096; 145 offset += sectorsize;
145 insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 146 insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
146 0, slot); 147 BTRFS_FILE_EXTENT_REG, 0, slot);
147 slot++; 148 slot++;
148 offset += 4096; 149 offset += sectorsize;
149 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, 150 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
151 2 * sectorsize, disk_bytenr, 4 * sectorsize,
150 BTRFS_FILE_EXTENT_REG, 0, slot); 152 BTRFS_FILE_EXTENT_REG, 0, slot);
151 slot++; 153 slot++;
152 offset += 8192; 154 offset += 2 * sectorsize;
153 disk_bytenr += 16384; 155 disk_bytenr += 4 * sectorsize;
154 156
155 /* Now for a unwritten prealloc extent */ 157 /* Now for a unwritten prealloc extent */
156 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 158 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
157 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 159 sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
158 slot++; 160 slot++;
159 offset += 4096; 161 offset += sectorsize;
160 162
161 /* 163 /*
162 * We want to jack up disk_bytenr a little more so the em stuff doesn't 164 * We want to jack up disk_bytenr a little more so the em stuff doesn't
163 * merge our records. 165 * merge our records.
164 */ 166 */
165 disk_bytenr += 8192; 167 disk_bytenr += 2 * sectorsize;
166 168
167 /* 169 /*
168 * Now for a partially written prealloc extent, basically the same as 170 * Now for a partially written prealloc extent, basically the same as
169 * the hole punch example above. Ram_bytes never changes when you mark 171 * the hole punch example above. Ram_bytes never changes when you mark
170 * extents written btw. 172 * extents written btw.
171 */ 173 */
172 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, 174 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
173 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 175 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
174 slot++; 176 slot++;
175 offset += 4096; 177 offset += sectorsize;
176 insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384, 178 insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
177 BTRFS_FILE_EXTENT_REG, 0, slot); 179 disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
180 slot);
178 slot++; 181 slot++;
179 offset += 4096; 182 offset += sectorsize;
180 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, 183 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
184 2 * sectorsize, disk_bytenr, 4 * sectorsize,
181 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 185 BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
182 slot++; 186 slot++;
183 offset += 8192; 187 offset += 2 * sectorsize;
184 disk_bytenr += 16384; 188 disk_bytenr += 4 * sectorsize;
185 189
186 /* Now a normal compressed extent */ 190 /* Now a normal compressed extent */
187 insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096, 191 insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
188 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 192 disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
193 BTRFS_COMPRESS_ZLIB, slot);
189 slot++; 194 slot++;
190 offset += 8192; 195 offset += 2 * sectorsize;
191 /* No merges */ 196 /* No merges */
192 disk_bytenr += 8192; 197 disk_bytenr += 2 * sectorsize;
193 198
194 /* Now a split compressed extent */ 199 /* Now a split compressed extent */
195 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096, 200 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
196 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 201 sectorsize, BTRFS_FILE_EXTENT_REG,
202 BTRFS_COMPRESS_ZLIB, slot);
197 slot++; 203 slot++;
198 offset += 4096; 204 offset += sectorsize;
199 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096, 205 insert_extent(root, offset, sectorsize, sectorsize, 0,
206 disk_bytenr + sectorsize, sectorsize,
200 BTRFS_FILE_EXTENT_REG, 0, slot); 207 BTRFS_FILE_EXTENT_REG, 0, slot);
201 slot++; 208 slot++;
202 offset += 4096; 209 offset += sectorsize;
203 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096, 210 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
211 2 * sectorsize, disk_bytenr, sectorsize,
204 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 212 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
205 slot++; 213 slot++;
206 offset += 8192; 214 offset += 2 * sectorsize;
207 disk_bytenr += 8192; 215 disk_bytenr += 2 * sectorsize;
208 216
209 /* Now extents that have a hole but no hole extent */ 217 /* Now extents that have a hole but no hole extent */
210 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 218 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
211 BTRFS_FILE_EXTENT_REG, 0, slot); 219 sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
212 slot++; 220 slot++;
213 offset += 16384; 221 offset += 4 * sectorsize;
214 disk_bytenr += 4096; 222 disk_bytenr += sectorsize;
215 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 223 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
216 BTRFS_FILE_EXTENT_REG, 0, slot); 224 sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
217} 225}
218 226
219static unsigned long prealloc_only = 0; 227static unsigned long prealloc_only = 0;
220static unsigned long compressed_only = 0; 228static unsigned long compressed_only = 0;
221static unsigned long vacancy_only = 0; 229static unsigned long vacancy_only = 0;
222 230
223static noinline int test_btrfs_get_extent(void) 231static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
224{ 232{
225 struct inode *inode = NULL; 233 struct inode *inode = NULL;
226 struct btrfs_root *root = NULL; 234 struct btrfs_root *root = NULL;
@@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void)
240 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 248 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
241 BTRFS_I(inode)->location.offset = 0; 249 BTRFS_I(inode)->location.offset = 0;
242 250
243 root = btrfs_alloc_dummy_root(); 251 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
244 if (IS_ERR(root)) { 252 if (IS_ERR(root)) {
245 test_msg("Couldn't allocate root\n"); 253 test_msg("Couldn't allocate root\n");
246 goto out; 254 goto out;
@@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
256 goto out; 264 goto out;
257 } 265 }
258 266
259 root->node = alloc_dummy_extent_buffer(NULL, 4096); 267 root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
260 if (!root->node) { 268 if (!root->node) {
261 test_msg("Couldn't allocate dummy buffer\n"); 269 test_msg("Couldn't allocate dummy buffer\n");
262 goto out; 270 goto out;
@@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void)
273 281
274 /* First with no extents */ 282 /* First with no extents */
275 BTRFS_I(inode)->root = root; 283 BTRFS_I(inode)->root = root;
276 em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0); 284 em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
277 if (IS_ERR(em)) { 285 if (IS_ERR(em)) {
278 em = NULL; 286 em = NULL;
279 test_msg("Got an error when we shouldn't have\n"); 287 test_msg("Got an error when we shouldn't have\n");
@@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void)
295 * setup_file_extents, so if you change anything there you need to 303 * setup_file_extents, so if you change anything there you need to
296 * update the comment and update the expected values below. 304 * update the comment and update the expected values below.
297 */ 305 */
298 setup_file_extents(root); 306 setup_file_extents(root, sectorsize);
299 307
300 em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); 308 em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
301 if (IS_ERR(em)) { 309 if (IS_ERR(em)) {
@@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void)
318 offset = em->start + em->len; 326 offset = em->start + em->len;
319 free_extent_map(em); 327 free_extent_map(em);
320 328
321 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 329 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
322 if (IS_ERR(em)) { 330 if (IS_ERR(em)) {
323 test_msg("Got an error when we shouldn't have\n"); 331 test_msg("Got an error when we shouldn't have\n");
324 goto out; 332 goto out;
@@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void)
327 test_msg("Expected an inline, got %llu\n", em->block_start); 335 test_msg("Expected an inline, got %llu\n", em->block_start);
328 goto out; 336 goto out;
329 } 337 }
330 if (em->start != offset || em->len != 4091) { 338
339 if (em->start != offset || em->len != (sectorsize - 5)) {
331 test_msg("Unexpected extent wanted start %llu len 1, got start " 340 test_msg("Unexpected extent wanted start %llu len 1, got start "
332 "%llu len %llu\n", offset, em->start, em->len); 341 "%llu len %llu\n", offset, em->start, em->len);
333 goto out; 342 goto out;
@@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void)
344 offset = em->start + em->len; 353 offset = em->start + em->len;
345 free_extent_map(em); 354 free_extent_map(em);
346 355
347 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 356 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
348 if (IS_ERR(em)) { 357 if (IS_ERR(em)) {
349 test_msg("Got an error when we shouldn't have\n"); 358 test_msg("Got an error when we shouldn't have\n");
350 goto out; 359 goto out;
@@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void)
366 free_extent_map(em); 375 free_extent_map(em);
367 376
368 /* Regular extent */ 377 /* Regular extent */
369 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 378 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
370 if (IS_ERR(em)) { 379 if (IS_ERR(em)) {
371 test_msg("Got an error when we shouldn't have\n"); 380 test_msg("Got an error when we shouldn't have\n");
372 goto out; 381 goto out;
@@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void)
375 test_msg("Expected a real extent, got %llu\n", em->block_start); 384 test_msg("Expected a real extent, got %llu\n", em->block_start);
376 goto out; 385 goto out;
377 } 386 }
378 if (em->start != offset || em->len != 4095) { 387 if (em->start != offset || em->len != sectorsize - 1) {
379 test_msg("Unexpected extent wanted start %llu len 4095, got " 388 test_msg("Unexpected extent wanted start %llu len 4095, got "
380 "start %llu len %llu\n", offset, em->start, em->len); 389 "start %llu len %llu\n", offset, em->start, em->len);
381 goto out; 390 goto out;
@@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void)
393 free_extent_map(em); 402 free_extent_map(em);
394 403
395 /* The next 3 are split extents */ 404 /* The next 3 are split extents */
396 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 405 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
397 if (IS_ERR(em)) { 406 if (IS_ERR(em)) {
398 test_msg("Got an error when we shouldn't have\n"); 407 test_msg("Got an error when we shouldn't have\n");
399 goto out; 408 goto out;
@@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void)
402 test_msg("Expected a real extent, got %llu\n", em->block_start); 411 test_msg("Expected a real extent, got %llu\n", em->block_start);
403 goto out; 412 goto out;
404 } 413 }
405 if (em->start != offset || em->len != 4096) { 414 if (em->start != offset || em->len != sectorsize) {
406 test_msg("Unexpected extent wanted start %llu len 4096, got " 415 test_msg("Unexpected extent start %llu len %u, "
407 "start %llu len %llu\n", offset, em->start, em->len); 416 "got start %llu len %llu\n",
417 offset, sectorsize, em->start, em->len);
408 goto out; 418 goto out;
409 } 419 }
410 if (em->flags != 0) { 420 if (em->flags != 0) {
@@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void)
421 offset = em->start + em->len; 431 offset = em->start + em->len;
422 free_extent_map(em); 432 free_extent_map(em);
423 433
424 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 434 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
425 if (IS_ERR(em)) { 435 if (IS_ERR(em)) {
426 test_msg("Got an error when we shouldn't have\n"); 436 test_msg("Got an error when we shouldn't have\n");
427 goto out; 437 goto out;
@@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void)
430 test_msg("Expected a hole, got %llu\n", em->block_start); 440 test_msg("Expected a hole, got %llu\n", em->block_start);
431 goto out; 441 goto out;
432 } 442 }
433 if (em->start != offset || em->len != 4096) { 443 if (em->start != offset || em->len != sectorsize) {
434 test_msg("Unexpected extent wanted start %llu len 4096, got " 444 test_msg("Unexpected extent wanted start %llu len %u, "
435 "start %llu len %llu\n", offset, em->start, em->len); 445 "got start %llu len %llu\n",
446 offset, sectorsize, em->start, em->len);
436 goto out; 447 goto out;
437 } 448 }
438 if (em->flags != 0) { 449 if (em->flags != 0) {
@@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void)
442 offset = em->start + em->len; 453 offset = em->start + em->len;
443 free_extent_map(em); 454 free_extent_map(em);
444 455
445 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 456 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
446 if (IS_ERR(em)) { 457 if (IS_ERR(em)) {
447 test_msg("Got an error when we shouldn't have\n"); 458 test_msg("Got an error when we shouldn't have\n");
448 goto out; 459 goto out;
@@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void)
451 test_msg("Expected a real extent, got %llu\n", em->block_start); 462 test_msg("Expected a real extent, got %llu\n", em->block_start);
452 goto out; 463 goto out;
453 } 464 }
454 if (em->start != offset || em->len != 8192) { 465 if (em->start != offset || em->len != 2 * sectorsize) {
455 test_msg("Unexpected extent wanted start %llu len 8192, got " 466 test_msg("Unexpected extent wanted start %llu len %u, "
456 "start %llu len %llu\n", offset, em->start, em->len); 467 "got start %llu len %llu\n",
468 offset, 2 * sectorsize, em->start, em->len);
457 goto out; 469 goto out;
458 } 470 }
459 if (em->flags != 0) { 471 if (em->flags != 0) {
@@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void)
475 free_extent_map(em); 487 free_extent_map(em);
476 488
477 /* Prealloc extent */ 489 /* Prealloc extent */
478 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 490 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
479 if (IS_ERR(em)) { 491 if (IS_ERR(em)) {
480 test_msg("Got an error when we shouldn't have\n"); 492 test_msg("Got an error when we shouldn't have\n");
481 goto out; 493 goto out;
@@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void)
484 test_msg("Expected a real extent, got %llu\n", em->block_start); 496 test_msg("Expected a real extent, got %llu\n", em->block_start);
485 goto out; 497 goto out;
486 } 498 }
487 if (em->start != offset || em->len != 4096) { 499 if (em->start != offset || em->len != sectorsize) {
488 test_msg("Unexpected extent wanted start %llu len 4096, got " 500 test_msg("Unexpected extent wanted start %llu len %u, "
489 "start %llu len %llu\n", offset, em->start, em->len); 501 "got start %llu len %llu\n",
502 offset, sectorsize, em->start, em->len);
490 goto out; 503 goto out;
491 } 504 }
492 if (em->flags != prealloc_only) { 505 if (em->flags != prealloc_only) {
@@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void)
503 free_extent_map(em); 516 free_extent_map(em);
504 517
505 /* The next 3 are a half written prealloc extent */ 518 /* The next 3 are a half written prealloc extent */
506 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 519 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
507 if (IS_ERR(em)) { 520 if (IS_ERR(em)) {
508 test_msg("Got an error when we shouldn't have\n"); 521 test_msg("Got an error when we shouldn't have\n");
509 goto out; 522 goto out;
@@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void)
512 test_msg("Expected a real extent, got %llu\n", em->block_start); 525 test_msg("Expected a real extent, got %llu\n", em->block_start);
513 goto out; 526 goto out;
514 } 527 }
515 if (em->start != offset || em->len != 4096) { 528 if (em->start != offset || em->len != sectorsize) {
516 test_msg("Unexpected extent wanted start %llu len 4096, got " 529 test_msg("Unexpected extent wanted start %llu len %u, "
517 "start %llu len %llu\n", offset, em->start, em->len); 530 "got start %llu len %llu\n",
531 offset, sectorsize, em->start, em->len);
518 goto out; 532 goto out;
519 } 533 }
520 if (em->flags != prealloc_only) { 534 if (em->flags != prealloc_only) {
@@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void)
532 offset = em->start + em->len; 546 offset = em->start + em->len;
533 free_extent_map(em); 547 free_extent_map(em);
534 548
535 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 549 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
536 if (IS_ERR(em)) { 550 if (IS_ERR(em)) {
537 test_msg("Got an error when we shouldn't have\n"); 551 test_msg("Got an error when we shouldn't have\n");
538 goto out; 552 goto out;
@@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void)
541 test_msg("Expected a real extent, got %llu\n", em->block_start); 555 test_msg("Expected a real extent, got %llu\n", em->block_start);
542 goto out; 556 goto out;
543 } 557 }
544 if (em->start != offset || em->len != 4096) { 558 if (em->start != offset || em->len != sectorsize) {
545 test_msg("Unexpected extent wanted start %llu len 4096, got " 559 test_msg("Unexpected extent wanted start %llu len %u, "
546 "start %llu len %llu\n", offset, em->start, em->len); 560 "got start %llu len %llu\n",
561 offset, sectorsize, em->start, em->len);
547 goto out; 562 goto out;
548 } 563 }
549 if (em->flags != 0) { 564 if (em->flags != 0) {
@@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void)
564 offset = em->start + em->len; 579 offset = em->start + em->len;
565 free_extent_map(em); 580 free_extent_map(em);
566 581
567 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 582 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
568 if (IS_ERR(em)) { 583 if (IS_ERR(em)) {
569 test_msg("Got an error when we shouldn't have\n"); 584 test_msg("Got an error when we shouldn't have\n");
570 goto out; 585 goto out;
@@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void)
573 test_msg("Expected a real extent, got %llu\n", em->block_start); 588 test_msg("Expected a real extent, got %llu\n", em->block_start);
574 goto out; 589 goto out;
575 } 590 }
576 if (em->start != offset || em->len != 8192) { 591 if (em->start != offset || em->len != 2 * sectorsize) {
577 test_msg("Unexpected extent wanted start %llu len 8192, got " 592 test_msg("Unexpected extent wanted start %llu len %u, "
578 "start %llu len %llu\n", offset, em->start, em->len); 593 "got start %llu len %llu\n",
594 offset, 2 * sectorsize, em->start, em->len);
579 goto out; 595 goto out;
580 } 596 }
581 if (em->flags != prealloc_only) { 597 if (em->flags != prealloc_only) {
@@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void)
598 free_extent_map(em); 614 free_extent_map(em);
599 615
600 /* Now for the compressed extent */ 616 /* Now for the compressed extent */
601 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 617 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
602 if (IS_ERR(em)) { 618 if (IS_ERR(em)) {
603 test_msg("Got an error when we shouldn't have\n"); 619 test_msg("Got an error when we shouldn't have\n");
604 goto out; 620 goto out;
@@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void)
607 test_msg("Expected a real extent, got %llu\n", em->block_start); 623 test_msg("Expected a real extent, got %llu\n", em->block_start);
608 goto out; 624 goto out;
609 } 625 }
610 if (em->start != offset || em->len != 8192) { 626 if (em->start != offset || em->len != 2 * sectorsize) {
611 test_msg("Unexpected extent wanted start %llu len 8192, got " 627 test_msg("Unexpected extent wanted start %llu len %u,"
612 "start %llu len %llu\n", offset, em->start, em->len); 628 "got start %llu len %llu\n",
629 offset, 2 * sectorsize, em->start, em->len);
613 goto out; 630 goto out;
614 } 631 }
615 if (em->flags != compressed_only) { 632 if (em->flags != compressed_only) {
@@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void)
631 free_extent_map(em); 648 free_extent_map(em);
632 649
633 /* Split compressed extent */ 650 /* Split compressed extent */
634 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 651 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
635 if (IS_ERR(em)) { 652 if (IS_ERR(em)) {
636 test_msg("Got an error when we shouldn't have\n"); 653 test_msg("Got an error when we shouldn't have\n");
637 goto out; 654 goto out;
@@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void)
640 test_msg("Expected a real extent, got %llu\n", em->block_start); 657 test_msg("Expected a real extent, got %llu\n", em->block_start);
641 goto out; 658 goto out;
642 } 659 }
643 if (em->start != offset || em->len != 4096) { 660 if (em->start != offset || em->len != sectorsize) {
644 test_msg("Unexpected extent wanted start %llu len 4096, got " 661 test_msg("Unexpected extent wanted start %llu len %u,"
645 "start %llu len %llu\n", offset, em->start, em->len); 662 "got start %llu len %llu\n",
663 offset, sectorsize, em->start, em->len);
646 goto out; 664 goto out;
647 } 665 }
648 if (em->flags != compressed_only) { 666 if (em->flags != compressed_only) {
@@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void)
665 offset = em->start + em->len; 683 offset = em->start + em->len;
666 free_extent_map(em); 684 free_extent_map(em);
667 685
668 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 686 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
669 if (IS_ERR(em)) { 687 if (IS_ERR(em)) {
670 test_msg("Got an error when we shouldn't have\n"); 688 test_msg("Got an error when we shouldn't have\n");
671 goto out; 689 goto out;
@@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void)
674 test_msg("Expected a real extent, got %llu\n", em->block_start); 692 test_msg("Expected a real extent, got %llu\n", em->block_start);
675 goto out; 693 goto out;
676 } 694 }
677 if (em->start != offset || em->len != 4096) { 695 if (em->start != offset || em->len != sectorsize) {
678 test_msg("Unexpected extent wanted start %llu len 4096, got " 696 test_msg("Unexpected extent wanted start %llu len %u, "
679 "start %llu len %llu\n", offset, em->start, em->len); 697 "got start %llu len %llu\n",
698 offset, sectorsize, em->start, em->len);
680 goto out; 699 goto out;
681 } 700 }
682 if (em->flags != 0) { 701 if (em->flags != 0) {
@@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void)
691 offset = em->start + em->len; 710 offset = em->start + em->len;
692 free_extent_map(em); 711 free_extent_map(em);
693 712
694 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 713 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
695 if (IS_ERR(em)) { 714 if (IS_ERR(em)) {
696 test_msg("Got an error when we shouldn't have\n"); 715 test_msg("Got an error when we shouldn't have\n");
697 goto out; 716 goto out;
@@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void)
701 disk_bytenr, em->block_start); 720 disk_bytenr, em->block_start);
702 goto out; 721 goto out;
703 } 722 }
704 if (em->start != offset || em->len != 8192) { 723 if (em->start != offset || em->len != 2 * sectorsize) {
705 test_msg("Unexpected extent wanted start %llu len 8192, got " 724 test_msg("Unexpected extent wanted start %llu len %u, "
706 "start %llu len %llu\n", offset, em->start, em->len); 725 "got start %llu len %llu\n",
726 offset, 2 * sectorsize, em->start, em->len);
707 goto out; 727 goto out;
708 } 728 }
709 if (em->flags != compressed_only) { 729 if (em->flags != compressed_only) {
@@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void)
725 free_extent_map(em); 745 free_extent_map(em);
726 746
727 /* A hole between regular extents but no hole extent */ 747 /* A hole between regular extents but no hole extent */
728 em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0); 748 em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
729 if (IS_ERR(em)) { 749 if (IS_ERR(em)) {
730 test_msg("Got an error when we shouldn't have\n"); 750 test_msg("Got an error when we shouldn't have\n");
731 goto out; 751 goto out;
@@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void)
734 test_msg("Expected a real extent, got %llu\n", em->block_start); 754 test_msg("Expected a real extent, got %llu\n", em->block_start);
735 goto out; 755 goto out;
736 } 756 }
737 if (em->start != offset || em->len != 4096) { 757 if (em->start != offset || em->len != sectorsize) {
738 test_msg("Unexpected extent wanted start %llu len 4096, got " 758 test_msg("Unexpected extent wanted start %llu len %u, "
739 "start %llu len %llu\n", offset, em->start, em->len); 759 "got start %llu len %llu\n",
760 offset, sectorsize, em->start, em->len);
740 goto out; 761 goto out;
741 } 762 }
742 if (em->flags != 0) { 763 if (em->flags != 0) {
@@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void)
765 * length of the actual hole, if this changes we'll have to change this 786 * length of the actual hole, if this changes we'll have to change this
766 * test. 787 * test.
767 */ 788 */
768 if (em->start != offset || em->len != 12288) { 789 if (em->start != offset || em->len != 3 * sectorsize) {
769 test_msg("Unexpected extent wanted start %llu len 12288, got " 790 test_msg("Unexpected extent wanted start %llu len %u, "
770 "start %llu len %llu\n", offset, em->start, em->len); 791 "got start %llu len %llu\n",
792 offset, 3 * sectorsize, em->start, em->len);
771 goto out; 793 goto out;
772 } 794 }
773 if (em->flags != vacancy_only) { 795 if (em->flags != vacancy_only) {
@@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void)
783 offset = em->start + em->len; 805 offset = em->start + em->len;
784 free_extent_map(em); 806 free_extent_map(em);
785 807
786 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 808 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
787 if (IS_ERR(em)) { 809 if (IS_ERR(em)) {
788 test_msg("Got an error when we shouldn't have\n"); 810 test_msg("Got an error when we shouldn't have\n");
789 goto out; 811 goto out;
@@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void)
792 test_msg("Expected a real extent, got %llu\n", em->block_start); 814 test_msg("Expected a real extent, got %llu\n", em->block_start);
793 goto out; 815 goto out;
794 } 816 }
795 if (em->start != offset || em->len != 4096) { 817 if (em->start != offset || em->len != sectorsize) {
796 test_msg("Unexpected extent wanted start %llu len 4096, got " 818 test_msg("Unexpected extent wanted start %llu len %u,"
797 "start %llu len %llu\n", offset, em->start, em->len); 819 "got start %llu len %llu\n",
820 offset, sectorsize, em->start, em->len);
798 goto out; 821 goto out;
799 } 822 }
800 if (em->flags != 0) { 823 if (em->flags != 0) {
@@ -815,7 +838,7 @@ out:
815 return ret; 838 return ret;
816} 839}
817 840
818static int test_hole_first(void) 841static int test_hole_first(u32 sectorsize, u32 nodesize)
819{ 842{
820 struct inode *inode = NULL; 843 struct inode *inode = NULL;
821 struct btrfs_root *root = NULL; 844 struct btrfs_root *root = NULL;
@@ -832,7 +855,7 @@ static int test_hole_first(void)
832 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 855 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
833 BTRFS_I(inode)->location.offset = 0; 856 BTRFS_I(inode)->location.offset = 0;
834 857
835 root = btrfs_alloc_dummy_root(); 858 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
836 if (IS_ERR(root)) { 859 if (IS_ERR(root)) {
837 test_msg("Couldn't allocate root\n"); 860 test_msg("Couldn't allocate root\n");
838 goto out; 861 goto out;
@@ -844,7 +867,7 @@ static int test_hole_first(void)
844 goto out; 867 goto out;
845 } 868 }
846 869
847 root->node = alloc_dummy_extent_buffer(NULL, 4096); 870 root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
848 if (!root->node) { 871 if (!root->node) {
849 test_msg("Couldn't allocate dummy buffer\n"); 872 test_msg("Couldn't allocate dummy buffer\n");
850 goto out; 873 goto out;
@@ -861,9 +884,9 @@ static int test_hole_first(void)
861 * btrfs_get_extent. 884 * btrfs_get_extent.
862 */ 885 */
863 insert_inode_item_key(root); 886 insert_inode_item_key(root);
864 insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096, 887 insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
865 BTRFS_FILE_EXTENT_REG, 0, 1); 888 sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
866 em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0); 889 em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
867 if (IS_ERR(em)) { 890 if (IS_ERR(em)) {
868 test_msg("Got an error when we shouldn't have\n"); 891 test_msg("Got an error when we shouldn't have\n");
869 goto out; 892 goto out;
@@ -872,9 +895,10 @@ static int test_hole_first(void)
872 test_msg("Expected a hole, got %llu\n", em->block_start); 895 test_msg("Expected a hole, got %llu\n", em->block_start);
873 goto out; 896 goto out;
874 } 897 }
875 if (em->start != 0 || em->len != 4096) { 898 if (em->start != 0 || em->len != sectorsize) {
876 test_msg("Unexpected extent wanted start 0 len 4096, got start " 899 test_msg("Unexpected extent wanted start 0 len %u, "
877 "%llu len %llu\n", em->start, em->len); 900 "got start %llu len %llu\n",
901 sectorsize, em->start, em->len);
878 goto out; 902 goto out;
879 } 903 }
880 if (em->flags != vacancy_only) { 904 if (em->flags != vacancy_only) {
@@ -884,18 +908,19 @@ static int test_hole_first(void)
884 } 908 }
885 free_extent_map(em); 909 free_extent_map(em);
886 910
887 em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0); 911 em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
888 if (IS_ERR(em)) { 912 if (IS_ERR(em)) {
889 test_msg("Got an error when we shouldn't have\n"); 913 test_msg("Got an error when we shouldn't have\n");
890 goto out; 914 goto out;
891 } 915 }
892 if (em->block_start != 4096) { 916 if (em->block_start != sectorsize) {
893 test_msg("Expected a real extent, got %llu\n", em->block_start); 917 test_msg("Expected a real extent, got %llu\n", em->block_start);
894 goto out; 918 goto out;
895 } 919 }
896 if (em->start != 4096 || em->len != 4096) { 920 if (em->start != sectorsize || em->len != sectorsize) {
897 test_msg("Unexpected extent wanted start 4096 len 4096, got " 921 test_msg("Unexpected extent wanted start %u len %u, "
898 "start %llu len %llu\n", em->start, em->len); 922 "got start %llu len %llu\n",
923 sectorsize, sectorsize, em->start, em->len);
899 goto out; 924 goto out;
900 } 925 }
901 if (em->flags != 0) { 926 if (em->flags != 0) {
@@ -912,7 +937,7 @@ out:
912 return ret; 937 return ret;
913} 938}
914 939
915static int test_extent_accounting(void) 940static int test_extent_accounting(u32 sectorsize, u32 nodesize)
916{ 941{
917 struct inode *inode = NULL; 942 struct inode *inode = NULL;
918 struct btrfs_root *root = NULL; 943 struct btrfs_root *root = NULL;
@@ -924,7 +949,7 @@ static int test_extent_accounting(void)
924 return ret; 949 return ret;
925 } 950 }
926 951
927 root = btrfs_alloc_dummy_root(); 952 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
928 if (IS_ERR(root)) { 953 if (IS_ERR(root)) {
929 test_msg("Couldn't allocate root\n"); 954 test_msg("Couldn't allocate root\n");
930 goto out; 955 goto out;
@@ -954,10 +979,11 @@ static int test_extent_accounting(void)
954 goto out; 979 goto out;
955 } 980 }
956 981
957 /* [BTRFS_MAX_EXTENT_SIZE][4k] */ 982 /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
958 BTRFS_I(inode)->outstanding_extents++; 983 BTRFS_I(inode)->outstanding_extents++;
959 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, 984 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
960 BTRFS_MAX_EXTENT_SIZE + 4095, NULL); 985 BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
986 NULL);
961 if (ret) { 987 if (ret) {
962 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 988 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
963 goto out; 989 goto out;
@@ -969,10 +995,10 @@ static int test_extent_accounting(void)
969 goto out; 995 goto out;
970 } 996 }
971 997
972 /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ 998 /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
973 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 999 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
974 BTRFS_MAX_EXTENT_SIZE >> 1, 1000 BTRFS_MAX_EXTENT_SIZE >> 1,
975 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, 1001 (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
976 EXTENT_DELALLOC | EXTENT_DIRTY | 1002 EXTENT_DELALLOC | EXTENT_DIRTY |
977 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, 1003 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
978 NULL, GFP_KERNEL); 1004 NULL, GFP_KERNEL);
@@ -987,10 +1013,11 @@ static int test_extent_accounting(void)
987 goto out; 1013 goto out;
988 } 1014 }
989 1015
990 /* [BTRFS_MAX_EXTENT_SIZE][4K] */ 1016 /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
991 BTRFS_I(inode)->outstanding_extents++; 1017 BTRFS_I(inode)->outstanding_extents++;
992 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, 1018 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
993 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, 1019 (BTRFS_MAX_EXTENT_SIZE >> 1)
1020 + sectorsize - 1,
994 NULL); 1021 NULL);
995 if (ret) { 1022 if (ret) {
996 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1023 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -1004,16 +1031,17 @@ static int test_extent_accounting(void)
1004 } 1031 }
1005 1032
1006 /* 1033 /*
1007 * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] 1034 * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
1008 * 1035 *
1009 * I'm artificially adding 2 to outstanding_extents because in the 1036 * I'm artificially adding 2 to outstanding_extents because in the
1010 * buffered IO case we'd add things up as we go, but I don't feel like 1037 * buffered IO case we'd add things up as we go, but I don't feel like
1011 * doing that here, this isn't the interesting case we want to test. 1038 * doing that here, this isn't the interesting case we want to test.
1012 */ 1039 */
1013 BTRFS_I(inode)->outstanding_extents += 2; 1040 BTRFS_I(inode)->outstanding_extents += 2;
1014 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, 1041 ret = btrfs_set_extent_delalloc(inode,
1015 (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, 1042 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
1016 NULL); 1043 (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
1044 NULL);
1017 if (ret) { 1045 if (ret) {
1018 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1046 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1019 goto out; 1047 goto out;
@@ -1025,10 +1053,13 @@ static int test_extent_accounting(void)
1025 goto out; 1053 goto out;
1026 } 1054 }
1027 1055
1028 /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ 1056 /*
1057 * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
1058 */
1029 BTRFS_I(inode)->outstanding_extents++; 1059 BTRFS_I(inode)->outstanding_extents++;
1030 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, 1060 ret = btrfs_set_extent_delalloc(inode,
1031 BTRFS_MAX_EXTENT_SIZE+8191, NULL); 1061 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1062 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
1032 if (ret) { 1063 if (ret) {
1033 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1064 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1034 goto out; 1065 goto out;
@@ -1042,8 +1073,8 @@ static int test_extent_accounting(void)
1042 1073
1043 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ 1074 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
1044 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 1075 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
1045 BTRFS_MAX_EXTENT_SIZE+4096, 1076 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1046 BTRFS_MAX_EXTENT_SIZE+8191, 1077 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
1047 EXTENT_DIRTY | EXTENT_DELALLOC | 1078 EXTENT_DIRTY | EXTENT_DELALLOC |
1048 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, 1079 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1049 NULL, GFP_KERNEL); 1080 NULL, GFP_KERNEL);
@@ -1063,8 +1094,9 @@ static int test_extent_accounting(void)
1063 * might fail and I'd rather satisfy my paranoia at this point. 1094 * might fail and I'd rather satisfy my paranoia at this point.
1064 */ 1095 */
1065 BTRFS_I(inode)->outstanding_extents++; 1096 BTRFS_I(inode)->outstanding_extents++;
1066 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, 1097 ret = btrfs_set_extent_delalloc(inode,
1067 BTRFS_MAX_EXTENT_SIZE+8191, NULL); 1098 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1099 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
1068 if (ret) { 1100 if (ret) {
1069 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1101 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1070 goto out; 1102 goto out;
@@ -1103,7 +1135,7 @@ out:
1103 return ret; 1135 return ret;
1104} 1136}
1105 1137
1106int btrfs_test_inodes(void) 1138int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
1107{ 1139{
1108 int ret; 1140 int ret;
1109 1141
@@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void)
1112 set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); 1144 set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
1113 1145
1114 test_msg("Running btrfs_get_extent tests\n"); 1146 test_msg("Running btrfs_get_extent tests\n");
1115 ret = test_btrfs_get_extent(); 1147 ret = test_btrfs_get_extent(sectorsize, nodesize);
1116 if (ret) 1148 if (ret)
1117 return ret; 1149 return ret;
1118 test_msg("Running hole first btrfs_get_extent test\n"); 1150 test_msg("Running hole first btrfs_get_extent test\n");
1119 ret = test_hole_first(); 1151 ret = test_hole_first(sectorsize, nodesize);
1120 if (ret) 1152 if (ret)
1121 return ret; 1153 return ret;
1122 test_msg("Running outstanding_extents tests\n"); 1154 test_msg("Running outstanding_extents tests\n");
1123 return test_extent_accounting(); 1155 return test_extent_accounting(sectorsize, nodesize);
1124} 1156}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8aa4ded31326..57a12c0d680b 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../transaction.h" 22#include "../transaction.h"
@@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
216 return ret; 217 return ret;
217} 218}
218 219
219static int test_no_shared_qgroup(struct btrfs_root *root) 220static int test_no_shared_qgroup(struct btrfs_root *root,
221 u32 sectorsize, u32 nodesize)
220{ 222{
221 struct btrfs_trans_handle trans; 223 struct btrfs_trans_handle trans;
222 struct btrfs_fs_info *fs_info = root->fs_info; 224 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
227 btrfs_init_dummy_trans(&trans); 229 btrfs_init_dummy_trans(&trans);
228 230
229 test_msg("Qgroup basic add\n"); 231 test_msg("Qgroup basic add\n");
230 ret = btrfs_create_qgroup(NULL, fs_info, 5); 232 ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
231 if (ret) { 233 if (ret) {
232 test_msg("Couldn't create a qgroup %d\n", ret); 234 test_msg("Couldn't create a qgroup %d\n", ret);
233 return ret; 235 return ret;
@@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
238 * we can only call btrfs_qgroup_account_extent() directly to test 240 * we can only call btrfs_qgroup_account_extent() directly to test
239 * quota. 241 * quota.
240 */ 242 */
241 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 243 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
242 if (ret) { 244 if (ret) {
243 ulist_free(old_roots); 245 ulist_free(old_roots);
244 test_msg("Couldn't find old roots: %d\n", ret); 246 test_msg("Couldn't find old roots: %d\n", ret);
245 return ret; 247 return ret;
246 } 248 }
247 249
248 ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); 250 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
251 BTRFS_FS_TREE_OBJECTID);
249 if (ret) 252 if (ret)
250 return ret; 253 return ret;
251 254
252 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 255 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
253 if (ret) { 256 if (ret) {
254 ulist_free(old_roots); 257 ulist_free(old_roots);
255 ulist_free(new_roots); 258 ulist_free(new_roots);
@@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
257 return ret; 260 return ret;
258 } 261 }
259 262
260 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 263 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
261 old_roots, new_roots); 264 nodesize, old_roots, new_roots);
262 if (ret) { 265 if (ret) {
263 test_msg("Couldn't account space for a qgroup %d\n", ret); 266 test_msg("Couldn't account space for a qgroup %d\n", ret);
264 return ret; 267 return ret;
265 } 268 }
266 269
267 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 270 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
271 nodesize, nodesize)) {
268 test_msg("Qgroup counts didn't match expected values\n"); 272 test_msg("Qgroup counts didn't match expected values\n");
269 return -EINVAL; 273 return -EINVAL;
270 } 274 }
271 old_roots = NULL; 275 old_roots = NULL;
272 new_roots = NULL; 276 new_roots = NULL;
273 277
274 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 278 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
275 if (ret) { 279 if (ret) {
276 ulist_free(old_roots); 280 ulist_free(old_roots);
277 test_msg("Couldn't find old roots: %d\n", ret); 281 test_msg("Couldn't find old roots: %d\n", ret);
278 return ret; 282 return ret;
279 } 283 }
280 284
281 ret = remove_extent_item(root, 4096, 4096); 285 ret = remove_extent_item(root, nodesize, nodesize);
282 if (ret) 286 if (ret)
283 return -EINVAL; 287 return -EINVAL;
284 288
285 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 289 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
286 if (ret) { 290 if (ret) {
287 ulist_free(old_roots); 291 ulist_free(old_roots);
288 ulist_free(new_roots); 292 ulist_free(new_roots);
@@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
290 return ret; 294 return ret;
291 } 295 }
292 296
293 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 297 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
294 old_roots, new_roots); 298 nodesize, old_roots, new_roots);
295 if (ret) { 299 if (ret) {
296 test_msg("Couldn't account space for a qgroup %d\n", ret); 300 test_msg("Couldn't account space for a qgroup %d\n", ret);
297 return -EINVAL; 301 return -EINVAL;
298 } 302 }
299 303
300 if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) { 304 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
301 test_msg("Qgroup counts didn't match expected values\n"); 305 test_msg("Qgroup counts didn't match expected values\n");
302 return -EINVAL; 306 return -EINVAL;
303 } 307 }
@@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
310 * right, also remove one of the roots and make sure the exclusive count is 314 * right, also remove one of the roots and make sure the exclusive count is
311 * adjusted properly. 315 * adjusted properly.
312 */ 316 */
313static int test_multiple_refs(struct btrfs_root *root) 317static int test_multiple_refs(struct btrfs_root *root,
318 u32 sectorsize, u32 nodesize)
314{ 319{
315 struct btrfs_trans_handle trans; 320 struct btrfs_trans_handle trans;
316 struct btrfs_fs_info *fs_info = root->fs_info; 321 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root)
322 327
323 test_msg("Qgroup multiple refs test\n"); 328 test_msg("Qgroup multiple refs test\n");
324 329
325 /* We have 5 created already from the previous test */ 330 /*
326 ret = btrfs_create_qgroup(NULL, fs_info, 256); 331 * We have BTRFS_FS_TREE_OBJECTID created already from the
332 * previous test.
333 */
334 ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
327 if (ret) { 335 if (ret) {
328 test_msg("Couldn't create a qgroup %d\n", ret); 336 test_msg("Couldn't create a qgroup %d\n", ret);
329 return ret; 337 return ret;
330 } 338 }
331 339
332 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 340 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
333 if (ret) { 341 if (ret) {
334 ulist_free(old_roots); 342 ulist_free(old_roots);
335 test_msg("Couldn't find old roots: %d\n", ret); 343 test_msg("Couldn't find old roots: %d\n", ret);
336 return ret; 344 return ret;
337 } 345 }
338 346
339 ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); 347 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
348 BTRFS_FS_TREE_OBJECTID);
340 if (ret) 349 if (ret)
341 return ret; 350 return ret;
342 351
343 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 352 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
344 if (ret) { 353 if (ret) {
345 ulist_free(old_roots); 354 ulist_free(old_roots);
346 ulist_free(new_roots); 355 ulist_free(new_roots);
@@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root)
348 return ret; 357 return ret;
349 } 358 }
350 359
351 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 360 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
352 old_roots, new_roots); 361 nodesize, old_roots, new_roots);
353 if (ret) { 362 if (ret) {
354 test_msg("Couldn't account space for a qgroup %d\n", ret); 363 test_msg("Couldn't account space for a qgroup %d\n", ret);
355 return ret; 364 return ret;
356 } 365 }
357 366
358 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 367 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
368 nodesize, nodesize)) {
359 test_msg("Qgroup counts didn't match expected values\n"); 369 test_msg("Qgroup counts didn't match expected values\n");
360 return -EINVAL; 370 return -EINVAL;
361 } 371 }
362 372
363 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 373 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
364 if (ret) { 374 if (ret) {
365 ulist_free(old_roots); 375 ulist_free(old_roots);
366 test_msg("Couldn't find old roots: %d\n", ret); 376 test_msg("Couldn't find old roots: %d\n", ret);
367 return ret; 377 return ret;
368 } 378 }
369 379
370 ret = add_tree_ref(root, 4096, 4096, 0, 256); 380 ret = add_tree_ref(root, nodesize, nodesize, 0,
381 BTRFS_FIRST_FREE_OBJECTID);
371 if (ret) 382 if (ret)
372 return ret; 383 return ret;
373 384
374 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 385 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
375 if (ret) { 386 if (ret) {
376 ulist_free(old_roots); 387 ulist_free(old_roots);
377 ulist_free(new_roots); 388 ulist_free(new_roots);
@@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root)
379 return ret; 390 return ret;
380 } 391 }
381 392
382 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 393 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
383 old_roots, new_roots); 394 nodesize, old_roots, new_roots);
384 if (ret) { 395 if (ret) {
385 test_msg("Couldn't account space for a qgroup %d\n", ret); 396 test_msg("Couldn't account space for a qgroup %d\n", ret);
386 return ret; 397 return ret;
387 } 398 }
388 399
389 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) { 400 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
401 nodesize, 0)) {
390 test_msg("Qgroup counts didn't match expected values\n"); 402 test_msg("Qgroup counts didn't match expected values\n");
391 return -EINVAL; 403 return -EINVAL;
392 } 404 }
393 405
394 if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) { 406 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
407 nodesize, 0)) {
395 test_msg("Qgroup counts didn't match expected values\n"); 408 test_msg("Qgroup counts didn't match expected values\n");
396 return -EINVAL; 409 return -EINVAL;
397 } 410 }
398 411
399 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 412 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
400 if (ret) { 413 if (ret) {
401 ulist_free(old_roots); 414 ulist_free(old_roots);
402 test_msg("Couldn't find old roots: %d\n", ret); 415 test_msg("Couldn't find old roots: %d\n", ret);
403 return ret; 416 return ret;
404 } 417 }
405 418
406 ret = remove_extent_ref(root, 4096, 4096, 0, 256); 419 ret = remove_extent_ref(root, nodesize, nodesize, 0,
420 BTRFS_FIRST_FREE_OBJECTID);
407 if (ret) 421 if (ret)
408 return ret; 422 return ret;
409 423
410 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 424 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
411 if (ret) { 425 if (ret) {
412 ulist_free(old_roots); 426 ulist_free(old_roots);
413 ulist_free(new_roots); 427 ulist_free(new_roots);
@@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root)
415 return ret; 429 return ret;
416 } 430 }
417 431
418 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 432 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
419 old_roots, new_roots); 433 nodesize, old_roots, new_roots);
420 if (ret) { 434 if (ret) {
421 test_msg("Couldn't account space for a qgroup %d\n", ret); 435 test_msg("Couldn't account space for a qgroup %d\n", ret);
422 return ret; 436 return ret;
423 } 437 }
424 438
425 if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) { 439 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
440 0, 0)) {
426 test_msg("Qgroup counts didn't match expected values\n"); 441 test_msg("Qgroup counts didn't match expected values\n");
427 return -EINVAL; 442 return -EINVAL;
428 } 443 }
429 444
430 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 445 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
446 nodesize, nodesize)) {
431 test_msg("Qgroup counts didn't match expected values\n"); 447 test_msg("Qgroup counts didn't match expected values\n");
432 return -EINVAL; 448 return -EINVAL;
433 } 449 }
@@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root)
435 return 0; 451 return 0;
436} 452}
437 453
438int btrfs_test_qgroups(void) 454int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
439{ 455{
440 struct btrfs_root *root; 456 struct btrfs_root *root;
441 struct btrfs_root *tmp_root; 457 struct btrfs_root *tmp_root;
442 int ret = 0; 458 int ret = 0;
443 459
444 root = btrfs_alloc_dummy_root(); 460 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
445 if (IS_ERR(root)) { 461 if (IS_ERR(root)) {
446 test_msg("Couldn't allocate root\n"); 462 test_msg("Couldn't allocate root\n");
447 return PTR_ERR(root); 463 return PTR_ERR(root);
@@ -468,7 +484,8 @@ int btrfs_test_qgroups(void)
468 * Can't use bytenr 0, some things freak out 484 * Can't use bytenr 0, some things freak out
469 * *cough*backref walking code*cough* 485 * *cough*backref walking code*cough*
470 */ 486 */
471 root->node = alloc_test_extent_buffer(root->fs_info, 4096); 487 root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
488 nodesize);
472 if (!root->node) { 489 if (!root->node) {
473 test_msg("Couldn't allocate dummy buffer\n"); 490 test_msg("Couldn't allocate dummy buffer\n");
474 ret = -ENOMEM; 491 ret = -ENOMEM;
@@ -476,16 +493,16 @@ int btrfs_test_qgroups(void)
476 } 493 }
477 btrfs_set_header_level(root->node, 0); 494 btrfs_set_header_level(root->node, 0);
478 btrfs_set_header_nritems(root->node, 0); 495 btrfs_set_header_nritems(root->node, 0);
479 root->alloc_bytenr += 8192; 496 root->alloc_bytenr += 2 * nodesize;
480 497
481 tmp_root = btrfs_alloc_dummy_root(); 498 tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
482 if (IS_ERR(tmp_root)) { 499 if (IS_ERR(tmp_root)) {
483 test_msg("Couldn't allocate a fs root\n"); 500 test_msg("Couldn't allocate a fs root\n");
484 ret = PTR_ERR(tmp_root); 501 ret = PTR_ERR(tmp_root);
485 goto out; 502 goto out;
486 } 503 }
487 504
488 tmp_root->root_key.objectid = 5; 505 tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
489 root->fs_info->fs_root = tmp_root; 506 root->fs_info->fs_root = tmp_root;
490 ret = btrfs_insert_fs_root(root->fs_info, tmp_root); 507 ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
491 if (ret) { 508 if (ret) {
@@ -493,14 +510,14 @@ int btrfs_test_qgroups(void)
493 goto out; 510 goto out;
494 } 511 }
495 512
496 tmp_root = btrfs_alloc_dummy_root(); 513 tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
497 if (IS_ERR(tmp_root)) { 514 if (IS_ERR(tmp_root)) {
498 test_msg("Couldn't allocate a fs root\n"); 515 test_msg("Couldn't allocate a fs root\n");
499 ret = PTR_ERR(tmp_root); 516 ret = PTR_ERR(tmp_root);
500 goto out; 517 goto out;
501 } 518 }
502 519
503 tmp_root->root_key.objectid = 256; 520 tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
504 ret = btrfs_insert_fs_root(root->fs_info, tmp_root); 521 ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
505 if (ret) { 522 if (ret) {
506 test_msg("Couldn't insert fs root %d\n", ret); 523 test_msg("Couldn't insert fs root %d\n", ret);
@@ -508,10 +525,10 @@ int btrfs_test_qgroups(void)
508 } 525 }
509 526
510 test_msg("Running qgroup tests\n"); 527 test_msg("Running qgroup tests\n");
511 ret = test_no_shared_qgroup(root); 528 ret = test_no_shared_qgroup(root, sectorsize, nodesize);
512 if (ret) 529 if (ret)
513 goto out; 530 goto out;
514 ret = test_multiple_refs(root); 531 ret = test_multiple_refs(root, sectorsize, nodesize);
515out: 532out:
516 btrfs_free_dummy_root(root); 533 btrfs_free_dummy_root(root);
517 return ret; 534 return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f6e24cb423ae..948aa186b353 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
818{ 818{
819 struct btrfs_transaction *cur_trans = trans->transaction; 819 struct btrfs_transaction *cur_trans = trans->transaction;
820 struct btrfs_fs_info *info = root->fs_info; 820 struct btrfs_fs_info *info = root->fs_info;
821 u64 transid = trans->transid;
821 unsigned long cur = trans->delayed_ref_updates; 822 unsigned long cur = trans->delayed_ref_updates;
822 int lock = (trans->type != TRANS_JOIN_NOLOCK); 823 int lock = (trans->type != TRANS_JOIN_NOLOCK);
823 int err = 0; 824 int err = 0;
@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
905 906
906 kmem_cache_free(btrfs_trans_handle_cachep, trans); 907 kmem_cache_free(btrfs_trans_handle_cachep, trans);
907 if (must_run_delayed_refs) { 908 if (must_run_delayed_refs) {
908 btrfs_async_run_delayed_refs(root, cur, 909 btrfs_async_run_delayed_refs(root, cur, transid,
909 must_run_delayed_refs == 1); 910 must_run_delayed_refs == 1);
910 } 911 }
911 return err; 912 return err;
@@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
1311 return ret; 1312 return ret;
1312} 1313}
1313 1314
1314/* Bisesctability fixup, remove in 4.8 */
1315#ifndef btrfs_std_error
1316#define btrfs_std_error btrfs_handle_fs_error
1317#endif
1318
1319/* 1315/*
1320 * Do all special snapshot related qgroup dirty hack. 1316 * Do all special snapshot related qgroup dirty hack.
1321 * 1317 *
@@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1385 switch_commit_roots(trans->transaction, fs_info); 1381 switch_commit_roots(trans->transaction, fs_info);
1386 ret = btrfs_write_and_wait_transaction(trans, src); 1382 ret = btrfs_write_and_wait_transaction(trans, src);
1387 if (ret) 1383 if (ret)
1388 btrfs_std_error(fs_info, ret, 1384 btrfs_handle_fs_error(fs_info, ret,
1389 "Error while writing out transaction for qgroup"); 1385 "Error while writing out transaction for qgroup");
1390 1386
1391out: 1387out:
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 9fe0ec2bf0fe..c5abee4f01ad 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
110 u64 chunk_bytes_reserved; 110 u64 chunk_bytes_reserved;
111 unsigned long use_count; 111 unsigned long use_count;
112 unsigned long blocks_reserved; 112 unsigned long blocks_reserved;
113 unsigned long blocks_used;
114 unsigned long delayed_ref_updates; 113 unsigned long delayed_ref_updates;
115 struct btrfs_transaction *transaction; 114 struct btrfs_transaction *transaction;
116 struct btrfs_block_rsv *block_rsv; 115 struct btrfs_block_rsv *block_rsv;
@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
121 bool can_flush_pending_bgs; 120 bool can_flush_pending_bgs;
122 bool reloc_reserved; 121 bool reloc_reserved;
123 bool sync; 122 bool sync;
123 bool dirty;
124 unsigned int type; 124 unsigned int type;
125 /* 125 /*
126 * this root is only needed to validate that the root passed to 126 * this root is only needed to validate that the root passed to
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index b7665af471d8..c05f69a8ec42 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2422 root_owner = btrfs_header_owner(parent); 2422 root_owner = btrfs_header_owner(parent);
2423 2423
2424 next = btrfs_find_create_tree_block(root, bytenr); 2424 next = btrfs_find_create_tree_block(root, bytenr);
2425 if (!next) 2425 if (IS_ERR(next))
2426 return -ENOMEM; 2426 return PTR_ERR(next);
2427 2427
2428 if (*level == 1) { 2428 if (*level == 1) {
2429 ret = wc->process_func(root, next, wc, ptr_gen); 2429 ret = wc->process_func(root, next, wc, ptr_gen);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bdc62561ede8..589f128173b1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2761 u64 dev_extent_len = 0; 2761 u64 dev_extent_len = 0;
2762 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2762 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2763 int i, ret = 0; 2763 int i, ret = 0;
2764 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2764 2765
2765 /* Just in case */ 2766 /* Just in case */
2766 root = root->fs_info->chunk_root; 2767 root = root->fs_info->chunk_root;
@@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2787 check_system_chunk(trans, extent_root, map->type); 2788 check_system_chunk(trans, extent_root, map->type);
2788 unlock_chunks(root->fs_info->chunk_root); 2789 unlock_chunks(root->fs_info->chunk_root);
2789 2790
2791 /*
2792 * Take the device list mutex to prevent races with the final phase of
2793 * a device replace operation that replaces the device object associated
2794 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2795 */
2796 mutex_lock(&fs_devices->device_list_mutex);
2790 for (i = 0; i < map->num_stripes; i++) { 2797 for (i = 0; i < map->num_stripes; i++) {
2791 struct btrfs_device *device = map->stripes[i].dev; 2798 struct btrfs_device *device = map->stripes[i].dev;
2792 ret = btrfs_free_dev_extent(trans, device, 2799 ret = btrfs_free_dev_extent(trans, device,
2793 map->stripes[i].physical, 2800 map->stripes[i].physical,
2794 &dev_extent_len); 2801 &dev_extent_len);
2795 if (ret) { 2802 if (ret) {
2803 mutex_unlock(&fs_devices->device_list_mutex);
2796 btrfs_abort_transaction(trans, root, ret); 2804 btrfs_abort_transaction(trans, root, ret);
2797 goto out; 2805 goto out;
2798 } 2806 }
@@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2811 if (map->stripes[i].dev) { 2819 if (map->stripes[i].dev) {
2812 ret = btrfs_update_device(trans, map->stripes[i].dev); 2820 ret = btrfs_update_device(trans, map->stripes[i].dev);
2813 if (ret) { 2821 if (ret) {
2822 mutex_unlock(&fs_devices->device_list_mutex);
2814 btrfs_abort_transaction(trans, root, ret); 2823 btrfs_abort_transaction(trans, root, ret);
2815 goto out; 2824 goto out;
2816 } 2825 }
2817 } 2826 }
2818 } 2827 }
2828 mutex_unlock(&fs_devices->device_list_mutex);
2829
2819 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); 2830 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2820 if (ret) { 2831 if (ret) {
2821 btrfs_abort_transaction(trans, root, ret); 2832 btrfs_abort_transaction(trans, root, ret);
@@ -4230,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4230 if (IS_ERR(uuid_root)) { 4241 if (IS_ERR(uuid_root)) {
4231 ret = PTR_ERR(uuid_root); 4242 ret = PTR_ERR(uuid_root);
4232 btrfs_abort_transaction(trans, tree_root, ret); 4243 btrfs_abort_transaction(trans, tree_root, ret);
4244 btrfs_end_transaction(trans, tree_root);
4233 return ret; 4245 return ret;
4234 } 4246 }
4235 4247
@@ -4682,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4682 4694
4683 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4695 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4684 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4696 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4685 btrfs_super_stripesize(info->super_copy)); 4697 extent_root->stripesize);
4686 data_stripes = num_stripes - 1; 4698 data_stripes = num_stripes - 1;
4687 } 4699 }
4688 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4700 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4689 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4701 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4690 btrfs_super_stripesize(info->super_copy)); 4702 extent_root->stripesize);
4691 data_stripes = num_stripes - 2; 4703 data_stripes = num_stripes - 2;
4692 } 4704 }
4693 4705
@@ -5762,20 +5774,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5762 } 5774 }
5763 } 5775 }
5764 if (found) { 5776 if (found) {
5765 if (physical_of_found + map->stripe_len <= 5777 struct btrfs_bio_stripe *tgtdev_stripe =
5766 dev_replace->cursor_left) { 5778 bbio->stripes + num_stripes;
5767 struct btrfs_bio_stripe *tgtdev_stripe =
5768 bbio->stripes + num_stripes;
5769 5779
5770 tgtdev_stripe->physical = physical_of_found; 5780 tgtdev_stripe->physical = physical_of_found;
5771 tgtdev_stripe->length = 5781 tgtdev_stripe->length =
5772 bbio->stripes[index_srcdev].length; 5782 bbio->stripes[index_srcdev].length;
5773 tgtdev_stripe->dev = dev_replace->tgtdev; 5783 tgtdev_stripe->dev = dev_replace->tgtdev;
5774 bbio->tgtdev_map[index_srcdev] = num_stripes; 5784 bbio->tgtdev_map[index_srcdev] = num_stripes;
5775 5785
5776 tgtdev_indexes++; 5786 tgtdev_indexes++;
5777 num_stripes++; 5787 num_stripes++;
5778 }
5779 } 5788 }
5780 } 5789 }
5781 5790
@@ -6250,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6250 return dev; 6259 return dev;
6251} 6260}
6252 6261
6253static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 6262/* Return -EIO if any error, otherwise return 0. */
6254 struct extent_buffer *leaf, 6263static int btrfs_check_chunk_valid(struct btrfs_root *root,
6255 struct btrfs_chunk *chunk) 6264 struct extent_buffer *leaf,
6265 struct btrfs_chunk *chunk, u64 logical)
6256{ 6266{
6257 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6258 struct map_lookup *map;
6259 struct extent_map *em;
6260 u64 logical;
6261 u64 length; 6267 u64 length;
6262 u64 stripe_len; 6268 u64 stripe_len;
6263 u64 devid; 6269 u16 num_stripes;
6264 u8 uuid[BTRFS_UUID_SIZE]; 6270 u16 sub_stripes;
6265 int num_stripes; 6271 u64 type;
6266 int ret;
6267 int i;
6268 6272
6269 logical = key->offset;
6270 length = btrfs_chunk_length(leaf, chunk); 6273 length = btrfs_chunk_length(leaf, chunk);
6271 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6274 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6272 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6275 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6273 /* Validation check */ 6276 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6277 type = btrfs_chunk_type(leaf, chunk);
6278
6274 if (!num_stripes) { 6279 if (!num_stripes) {
6275 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", 6280 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6276 num_stripes); 6281 num_stripes);
@@ -6281,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6281 "invalid chunk logical %llu", logical); 6286 "invalid chunk logical %llu", logical);
6282 return -EIO; 6287 return -EIO;
6283 } 6288 }
6289 if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
6290 btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
6291 btrfs_chunk_sector_size(leaf, chunk));
6292 return -EIO;
6293 }
6284 if (!length || !IS_ALIGNED(length, root->sectorsize)) { 6294 if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6285 btrfs_err(root->fs_info, 6295 btrfs_err(root->fs_info,
6286 "invalid chunk length %llu", length); 6296 "invalid chunk length %llu", length);
@@ -6292,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6292 return -EIO; 6302 return -EIO;
6293 } 6303 }
6294 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6304 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6295 btrfs_chunk_type(leaf, chunk)) { 6305 type) {
6296 btrfs_err(root->fs_info, "unrecognized chunk type: %llu", 6306 btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6297 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6307 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6298 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6308 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6299 btrfs_chunk_type(leaf, chunk)); 6309 btrfs_chunk_type(leaf, chunk));
6300 return -EIO; 6310 return -EIO;
6301 } 6311 }
6312 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6313 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
6314 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6315 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6316 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
6317 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6318 num_stripes != 1)) {
6319 btrfs_err(root->fs_info,
6320 "invalid num_stripes:sub_stripes %u:%u for profile %llu",
6321 num_stripes, sub_stripes,
6322 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6323 return -EIO;
6324 }
6325
6326 return 0;
6327}
6328
6329static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6330 struct extent_buffer *leaf,
6331 struct btrfs_chunk *chunk)
6332{
6333 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6334 struct map_lookup *map;
6335 struct extent_map *em;
6336 u64 logical;
6337 u64 length;
6338 u64 stripe_len;
6339 u64 devid;
6340 u8 uuid[BTRFS_UUID_SIZE];
6341 int num_stripes;
6342 int ret;
6343 int i;
6344
6345 logical = key->offset;
6346 length = btrfs_chunk_length(leaf, chunk);
6347 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6348 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6349
6350 ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
6351 if (ret)
6352 return ret;
6302 6353
6303 read_lock(&map_tree->map_tree.lock); 6354 read_lock(&map_tree->map_tree.lock);
6304 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6355 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6546,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6546 u32 array_size; 6597 u32 array_size;
6547 u32 len = 0; 6598 u32 len = 0;
6548 u32 cur_offset; 6599 u32 cur_offset;
6600 u64 type;
6549 struct btrfs_key key; 6601 struct btrfs_key key;
6550 6602
6551 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); 6603 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6555,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6555 * overallocate but we can keep it as-is, only the first page is used. 6607 * overallocate but we can keep it as-is, only the first page is used.
6556 */ 6608 */
6557 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); 6609 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6558 if (!sb) 6610 if (IS_ERR(sb))
6559 return -ENOMEM; 6611 return PTR_ERR(sb);
6560 set_extent_buffer_uptodate(sb); 6612 set_extent_buffer_uptodate(sb);
6561 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6613 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6562 /* 6614 /*
@@ -6612,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6612 break; 6664 break;
6613 } 6665 }
6614 6666
6667 type = btrfs_chunk_type(sb, chunk);
6668 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6669 btrfs_err(root->fs_info,
6670 "invalid chunk type %llu in sys_array at offset %u",
6671 type, cur_offset);
6672 ret = -EIO;
6673 break;
6674 }
6675
6615 len = btrfs_chunk_item_size(num_stripes); 6676 len = btrfs_chunk_item_size(num_stripes);
6616 if (cur_offset + len > array_size) 6677 if (cur_offset + len > array_size)
6617 goto out_short_read; 6678 goto out_short_read;
@@ -6630,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6630 sb_array_offset += len; 6691 sb_array_offset += len;
6631 cur_offset += len; 6692 cur_offset += len;
6632 } 6693 }
6694 clear_extent_buffer_uptodate(sb);
6633 free_extent_buffer_stale(sb); 6695 free_extent_buffer_stale(sb);
6634 return ret; 6696 return ret;
6635 6697
6636out_short_read: 6698out_short_read:
6637 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6699 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6638 len, cur_offset); 6700 len, cur_offset);
6701 clear_extent_buffer_uptodate(sb);
6639 free_extent_buffer_stale(sb); 6702 free_extent_buffer_stale(sb);
6640 return -EIO; 6703 return -EIO;
6641} 6704}
@@ -6648,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6648 struct btrfs_key found_key; 6711 struct btrfs_key found_key;
6649 int ret; 6712 int ret;
6650 int slot; 6713 int slot;
6714 u64 total_dev = 0;
6651 6715
6652 root = root->fs_info->chunk_root; 6716 root = root->fs_info->chunk_root;
6653 6717
@@ -6689,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6689 ret = read_one_dev(root, leaf, dev_item); 6753 ret = read_one_dev(root, leaf, dev_item);
6690 if (ret) 6754 if (ret)
6691 goto error; 6755 goto error;
6756 total_dev++;
6692 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6757 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6693 struct btrfs_chunk *chunk; 6758 struct btrfs_chunk *chunk;
6694 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6759 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -6698,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6698 } 6763 }
6699 path->slots[0]++; 6764 path->slots[0]++;
6700 } 6765 }
6766
6767 /*
6768 * After loading chunk tree, we've got all device information,
6769 * do another round of validation checks.
6770 */
6771 if (total_dev != root->fs_info->fs_devices->total_devices) {
6772 btrfs_err(root->fs_info,
6773 "super_num_devices %llu mismatch with num_devices %llu found here",
6774 btrfs_super_num_devices(root->fs_info->super_copy),
6775 total_dev);
6776 ret = -EINVAL;
6777 goto error;
6778 }
6779 if (btrfs_super_total_bytes(root->fs_info->super_copy) <
6780 root->fs_info->fs_devices->total_rw_bytes) {
6781 btrfs_err(root->fs_info,
6782 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6783 btrfs_super_total_bytes(root->fs_info->super_copy),
6784 root->fs_info->fs_devices->total_rw_bytes);
6785 ret = -EINVAL;
6786 goto error;
6787 }
6701 ret = 0; 6788 ret = 0;
6702error: 6789error:
6703 unlock_chunks(root); 6790 unlock_chunks(root);
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 861d611b8c05..ce5f345d70f5 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
380 * check if the backing cache is updated to FS-Cache 380 * check if the backing cache is updated to FS-Cache
381 * - called by FS-Cache when evaluates if need to invalidate the cache 381 * - called by FS-Cache when evaluates if need to invalidate the cache
382 */ 382 */
383static bool cachefiles_check_consistency(struct fscache_operation *op) 383static int cachefiles_check_consistency(struct fscache_operation *op)
384{ 384{
385 struct cachefiles_object *object; 385 struct cachefiles_object *object;
386 struct cachefiles_cache *cache; 386 struct cachefiles_cache *cache;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index eeb71e5de27a..26a9d10d75e9 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req)
276 for (i = 0; i < num_pages; i++) { 276 for (i = 0; i < num_pages; i++) {
277 struct page *page = osd_data->pages[i]; 277 struct page *page = osd_data->pages[i];
278 278
279 if (rc < 0 && rc != -ENOENT) 279 if (rc < 0 && rc != -ENOENT) {
280 ceph_fscache_readpage_cancel(inode, page);
280 goto unlock; 281 goto unlock;
282 }
281 if (bytes < (int)PAGE_SIZE) { 283 if (bytes < (int)PAGE_SIZE) {
282 /* zero (remainder of) page */ 284 /* zero (remainder of) page */
283 int s = bytes < 0 ? 0 : bytes; 285 int s = bytes < 0 ? 0 : bytes;
@@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
535 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 537 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
536 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 538 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
537 539
538 ceph_readpage_to_fscache(inode, page);
539
540 set_page_writeback(page); 540 set_page_writeback(page);
541 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 541 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
542 &ci->i_layout, snapc, 542 &ci->i_layout, snapc,
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index c052b5bf219b..238c55b01723 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -25,6 +25,7 @@
25#include "cache.h" 25#include "cache.h"
26 26
27struct ceph_aux_inode { 27struct ceph_aux_inode {
28 u64 version;
28 struct timespec mtime; 29 struct timespec mtime;
29 loff_t size; 30 loff_t size;
30}; 31};
@@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
69 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, 70 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
70 &ceph_fscache_fsid_object_def, 71 &ceph_fscache_fsid_object_def,
71 fsc, true); 72 fsc, true);
72 73 if (!fsc->fscache)
73 if (fsc->fscache == NULL) {
74 pr_err("Unable to resgister fsid: %p fscache cookie", fsc); 74 pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
75 return 0;
76 }
77
78 fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
79 if (fsc->revalidate_wq == NULL)
80 return -ENOMEM;
81 75
82 return 0; 76 return 0;
83} 77}
@@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
105 const struct inode* inode = &ci->vfs_inode; 99 const struct inode* inode = &ci->vfs_inode;
106 100
107 memset(&aux, 0, sizeof(aux)); 101 memset(&aux, 0, sizeof(aux));
102 aux.version = ci->i_version;
108 aux.mtime = inode->i_mtime; 103 aux.mtime = inode->i_mtime;
109 aux.size = i_size_read(inode); 104 aux.size = i_size_read(inode);
110 105
@@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
131 return FSCACHE_CHECKAUX_OBSOLETE; 126 return FSCACHE_CHECKAUX_OBSOLETE;
132 127
133 memset(&aux, 0, sizeof(aux)); 128 memset(&aux, 0, sizeof(aux));
129 aux.version = ci->i_version;
134 aux.mtime = inode->i_mtime; 130 aux.mtime = inode->i_mtime;
135 aux.size = i_size_read(inode); 131 aux.size = i_size_read(inode);
136 132
@@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
181 .now_uncached = ceph_fscache_inode_now_uncached, 177 .now_uncached = ceph_fscache_inode_now_uncached,
182}; 178};
183 179
184void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, 180void ceph_fscache_register_inode_cookie(struct inode *inode)
185 struct ceph_inode_info* ci)
186{ 181{
187 struct inode* inode = &ci->vfs_inode; 182 struct ceph_inode_info *ci = ceph_inode(inode);
183 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
188 184
189 /* No caching for filesystem */ 185 /* No caching for filesystem */
190 if (fsc->fscache == NULL) 186 if (fsc->fscache == NULL)
191 return; 187 return;
192 188
193 /* Only cache for regular files that are read only */ 189 /* Only cache for regular files that are read only */
194 if ((ci->vfs_inode.i_mode & S_IFREG) == 0) 190 if (!S_ISREG(inode->i_mode))
195 return; 191 return;
196 192
197 /* Avoid multiple racing open requests */ 193 inode_lock_nested(inode, I_MUTEX_CHILD);
198 inode_lock(inode); 194 if (!ci->fscache) {
199 195 ci->fscache = fscache_acquire_cookie(fsc->fscache,
200 if (ci->fscache) 196 &ceph_fscache_inode_object_def,
201 goto done; 197 ci, false);
202 198 }
203 ci->fscache = fscache_acquire_cookie(fsc->fscache,
204 &ceph_fscache_inode_object_def,
205 ci, true);
206 fscache_check_consistency(ci->fscache);
207done:
208 inode_unlock(inode); 199 inode_unlock(inode);
209
210} 200}
211 201
212void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) 202void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
@@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
222 fscache_relinquish_cookie(cookie, 0); 212 fscache_relinquish_cookie(cookie, 0);
223} 213}
224 214
215static bool ceph_fscache_can_enable(void *data)
216{
217 struct inode *inode = data;
218 return !inode_is_open_for_write(inode);
219}
220
221void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
222{
223 struct ceph_inode_info *ci = ceph_inode(inode);
224
225 if (!fscache_cookie_valid(ci->fscache))
226 return;
227
228 if (inode_is_open_for_write(inode)) {
229 dout("fscache_file_set_cookie %p %p disabling cache\n",
230 inode, filp);
231 fscache_disable_cookie(ci->fscache, false);
232 fscache_uncache_all_inode_pages(ci->fscache, inode);
233 } else {
234 fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
235 inode);
236 if (fscache_cookie_enabled(ci->fscache)) {
237 dout("fscache_file_set_cookie %p %p enabing cache\n",
238 inode, filp);
239 }
240 }
241}
242
225static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) 243static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
226{ 244{
227 if (!error) 245 if (!error)
@@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int
238 256
239static inline bool cache_valid(struct ceph_inode_info *ci) 257static inline bool cache_valid(struct ceph_inode_info *ci)
240{ 258{
241 return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && 259 return ci->i_fscache_gen == ci->i_rdcache_gen;
242 (ci->i_fscache_gen == ci->i_rdcache_gen));
243} 260}
244 261
245 262
@@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
332 349
333void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) 350void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
334{ 351{
335 if (fsc->revalidate_wq)
336 destroy_workqueue(fsc->revalidate_wq);
337
338 fscache_relinquish_cookie(fsc->fscache, 0); 352 fscache_relinquish_cookie(fsc->fscache, 0);
339 fsc->fscache = NULL; 353 fsc->fscache = NULL;
340} 354}
341 355
342static void ceph_revalidate_work(struct work_struct *work) 356/*
343{ 357 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
344 int issued; 358 */
345 u32 orig_gen; 359void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
346 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
347 i_revalidate_work);
348 struct inode *inode = &ci->vfs_inode;
349
350 spin_lock(&ci->i_ceph_lock);
351 issued = __ceph_caps_issued(ci, NULL);
352 orig_gen = ci->i_rdcache_gen;
353 spin_unlock(&ci->i_ceph_lock);
354
355 if (!(issued & CEPH_CAP_FILE_CACHE)) {
356 dout("revalidate_work lost cache before validation %p\n",
357 inode);
358 goto out;
359 }
360
361 if (!fscache_check_consistency(ci->fscache))
362 fscache_invalidate(ci->fscache);
363
364 spin_lock(&ci->i_ceph_lock);
365 /* Update the new valid generation (backwards sanity check too) */
366 if (orig_gen > ci->i_fscache_gen) {
367 ci->i_fscache_gen = orig_gen;
368 }
369 spin_unlock(&ci->i_ceph_lock);
370
371out:
372 iput(&ci->vfs_inode);
373}
374
375void ceph_queue_revalidate(struct inode *inode)
376{ 360{
377 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 361 if (cache_valid(ci))
378 struct ceph_inode_info *ci = ceph_inode(inode);
379
380 if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
381 return; 362 return;
382 363
383 ihold(inode); 364 /* resue i_truncate_mutex. There should be no pending
384 365 * truncate while the caller holds CEPH_CAP_FILE_RD */
385 if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, 366 mutex_lock(&ci->i_truncate_mutex);
386 &ci->i_revalidate_work)) { 367 if (!cache_valid(ci)) {
387 dout("ceph_queue_revalidate %p\n", inode); 368 if (fscache_check_consistency(ci->fscache))
388 } else { 369 fscache_invalidate(ci->fscache);
389 dout("ceph_queue_revalidate %p failed\n)", inode); 370 spin_lock(&ci->i_ceph_lock);
390 iput(inode); 371 ci->i_fscache_gen = ci->i_rdcache_gen;
372 spin_unlock(&ci->i_ceph_lock);
391 } 373 }
392} 374 mutex_unlock(&ci->i_truncate_mutex);
393
394void ceph_fscache_inode_init(struct ceph_inode_info *ci)
395{
396 ci->fscache = NULL;
397 /* The first load is verifed cookie open time */
398 ci->i_fscache_gen = 1;
399 INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
400} 375}
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 5ac591bd012b..7e72c7594f0c 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -34,10 +34,10 @@ void ceph_fscache_unregister(void);
34int ceph_fscache_register_fs(struct ceph_fs_client* fsc); 34int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
35void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); 35void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
36 36
37void ceph_fscache_inode_init(struct ceph_inode_info *ci); 37void ceph_fscache_register_inode_cookie(struct inode *inode);
38void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
39 struct ceph_inode_info* ci);
40void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); 38void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
39void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
40void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
41 41
42int ceph_readpage_from_fscache(struct inode *inode, struct page *page); 42int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
43int ceph_readpages_from_fscache(struct inode *inode, 43int ceph_readpages_from_fscache(struct inode *inode,
@@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode,
46 unsigned *nr_pages); 46 unsigned *nr_pages);
47void ceph_readpage_to_fscache(struct inode *inode, struct page *page); 47void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
48void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); 48void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
49void ceph_queue_revalidate(struct inode *inode);
50 49
51static inline void ceph_fscache_update_objectsize(struct inode *inode) 50static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
52{ 51{
53 struct ceph_inode_info *ci = ceph_inode(inode); 52 ci->fscache = NULL;
54 fscache_attr_changed(ci->fscache); 53 ci->i_fscache_gen = 0;
55} 54}
56 55
57static inline void ceph_fscache_invalidate(struct inode *inode) 56static inline void ceph_fscache_invalidate(struct inode *inode)
@@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
88 return fscache_readpages_cancel(ci->fscache, pages); 87 return fscache_readpages_cancel(ci->fscache, pages);
89} 88}
90 89
90static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
91{
92 ci->i_fscache_gen = ci->i_rdcache_gen - 1;
93}
94
91#else 95#else
92 96
93static inline int ceph_fscache_register(void) 97static inline int ceph_fscache_register(void)
@@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
112{ 116{
113} 117}
114 118
115static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, 119static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
116 struct ceph_inode_info* ci) 120{
121}
122
123static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
124{
125}
126
127static inline void ceph_fscache_file_set_cookie(struct inode *inode,
128 struct file *filp)
129{
130}
131
132static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
117{ 133{
118} 134}
119 135
@@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode,
141{ 157{
142} 158}
143 159
144static inline void ceph_fscache_update_objectsize(struct inode *inode)
145{
146}
147
148static inline void ceph_fscache_invalidate(struct inode *inode) 160static inline void ceph_fscache_invalidate(struct inode *inode)
149{ 161{
150} 162}
@@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode,
154{ 166{
155} 167}
156 168
157static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
158{
159}
160
161static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) 169static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
162{ 170{
163 return 1; 171 return 1;
@@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
173{ 181{
174} 182}
175 183
176static inline void ceph_queue_revalidate(struct inode *inode) 184static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
177{ 185{
178} 186}
179 187
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c17b5d76d75e..6f60d0a3d0f9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2393,6 +2393,9 @@ again:
2393 snap_rwsem_locked = true; 2393 snap_rwsem_locked = true;
2394 } 2394 }
2395 *got = need | (have & want); 2395 *got = need | (have & want);
2396 if ((need & CEPH_CAP_FILE_RD) &&
2397 !(*got & CEPH_CAP_FILE_CACHE))
2398 ceph_disable_fscache_readpage(ci);
2396 __take_cap_refs(ci, *got, true); 2399 __take_cap_refs(ci, *got, true);
2397 ret = 1; 2400 ret = 1;
2398 } 2401 }
@@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2554 break; 2557 break;
2555 } 2558 }
2556 2559
2560 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2561 ceph_fscache_revalidate_cookie(ci);
2562
2557 *got = _got; 2563 *got = _got;
2558 return 0; 2564 return 0;
2559} 2565}
@@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2795 bool writeback = false; 2801 bool writeback = false;
2796 bool queue_trunc = false; 2802 bool queue_trunc = false;
2797 bool queue_invalidate = false; 2803 bool queue_invalidate = false;
2798 bool queue_revalidate = false;
2799 bool deleted_inode = false; 2804 bool deleted_inode = false;
2800 bool fill_inline = false; 2805 bool fill_inline = false;
2801 2806
@@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2837 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2842 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2838 } 2843 }
2839 } 2844 }
2840
2841 ceph_fscache_invalidate(inode);
2842 } 2845 }
2843 2846
2844 /* side effects now are allowed */ 2847 /* side effects now are allowed */
@@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2880 } 2883 }
2881 } 2884 }
2882 2885
2883 /* Do we need to revalidate our fscache cookie. Don't bother on the
2884 * first cache cap as we already validate at cookie creation time. */
2885 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2886 queue_revalidate = true;
2887
2888 if (newcaps & CEPH_CAP_ANY_RD) { 2886 if (newcaps & CEPH_CAP_ANY_RD) {
2889 /* ctime/mtime/atime? */ 2887 /* ctime/mtime/atime? */
2890 ceph_decode_timespec(&mtime, &grant->mtime); 2888 ceph_decode_timespec(&mtime, &grant->mtime);
@@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2993 if (fill_inline) 2991 if (fill_inline)
2994 ceph_fill_inline_data(inode, NULL, inline_data, inline_len); 2992 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2995 2993
2996 if (queue_trunc) { 2994 if (queue_trunc)
2997 ceph_queue_vmtruncate(inode); 2995 ceph_queue_vmtruncate(inode);
2998 ceph_queue_revalidate(inode);
2999 } else if (queue_revalidate)
3000 ceph_queue_revalidate(inode);
3001 2996
3002 if (writeback) 2997 if (writeback)
3003 /* 2998 /*
@@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode,
3199 truncate_seq, truncate_size, size); 3194 truncate_seq, truncate_size, size);
3200 spin_unlock(&ci->i_ceph_lock); 3195 spin_unlock(&ci->i_ceph_lock);
3201 3196
3202 if (queue_trunc) { 3197 if (queue_trunc)
3203 ceph_queue_vmtruncate(inode); 3198 ceph_queue_vmtruncate(inode);
3204 ceph_fscache_invalidate(inode);
3205 }
3206} 3199}
3207 3200
3208/* 3201/*
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index a888df6f2d71..ce2f5795e44b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{ 137{
138 struct ceph_file_info *cf; 138 struct ceph_file_info *cf;
139 int ret = 0; 139 int ret = 0;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
143 140
144 switch (inode->i_mode & S_IFMT) { 141 switch (inode->i_mode & S_IFMT) {
145 case S_IFREG: 142 case S_IFREG:
146 /* First file open request creates the cookie, we want to keep 143 ceph_fscache_register_inode_cookie(inode);
147 * this cookie around for the filetime of the inode as not to 144 ceph_fscache_file_set_cookie(inode, file);
148 * have to worry about fscache register / revoke / operation
149 * races.
150 *
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
153 */
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
157 case S_IFDIR: 145 case S_IFDIR:
158 dout("init_file %p %p 0%o (regular)\n", inode, file, 146 dout("init_file %p %p 0%o (regular)\n", inode, file,
159 inode->i_mode); 147 inode->i_mode);
@@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349 } 1337 }
1350 1338
1351retry_snap: 1339retry_snap:
1352 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 1340 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1353 err = -ENOSPC; 1341 err = -ENOSPC;
1354 goto out; 1342 goto out;
1355 } 1343 }
@@ -1407,7 +1395,6 @@ retry_snap:
1407 iov_iter_advance(from, written); 1395 iov_iter_advance(from, written);
1408 ceph_put_snap_context(snapc); 1396 ceph_put_snap_context(snapc);
1409 } else { 1397 } else {
1410 loff_t old_size = i_size_read(inode);
1411 /* 1398 /*
1412 * No need to acquire the i_truncate_mutex. Because 1399 * No need to acquire the i_truncate_mutex. Because
1413 * the MDS revokes Fwb caps before sending truncate 1400 * the MDS revokes Fwb caps before sending truncate
@@ -1418,8 +1405,6 @@ retry_snap:
1418 written = generic_perform_write(file, from, pos); 1405 written = generic_perform_write(file, from, pos);
1419 if (likely(written >= 0)) 1406 if (likely(written >= 0))
1420 iocb->ki_pos = pos + written; 1407 iocb->ki_pos = pos + written;
1421 if (i_size_read(inode) > old_size)
1422 ceph_fscache_update_objectsize(inode);
1423 inode_unlock(inode); 1408 inode_unlock(inode);
1424 } 1409 }
1425 1410
@@ -1440,7 +1425,7 @@ retry_snap:
1440 ceph_put_cap_refs(ci, got); 1425 ceph_put_cap_refs(ci, got);
1441 1426
1442 if (written >= 0) { 1427 if (written >= 0) {
1443 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) 1428 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1444 iocb->ki_flags |= IOCB_DSYNC; 1429 iocb->ki_flags |= IOCB_DSYNC;
1445 1430
1446 written = generic_write_sync(iocb, written); 1431 written = generic_write_sync(iocb, written);
@@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode,
1672 goto unlock; 1657 goto unlock;
1673 } 1658 }
1674 1659
1675 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && 1660 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1676 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1661 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1677 ret = -ENOSPC; 1662 ret = -ENOSPC;
1678 goto unlock; 1663 goto unlock;
1679 } 1664 }
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 0130a8592191..0168b49fb6ad 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -103,7 +103,6 @@ struct ceph_fs_client {
103 103
104#ifdef CONFIG_CEPH_FSCACHE 104#ifdef CONFIG_CEPH_FSCACHE
105 struct fscache_cookie *fscache; 105 struct fscache_cookie *fscache;
106 struct workqueue_struct *revalidate_wq;
107#endif 106#endif
108}; 107};
109 108
@@ -360,8 +359,7 @@ struct ceph_inode_info {
360 359
361#ifdef CONFIG_CEPH_FSCACHE 360#ifdef CONFIG_CEPH_FSCACHE
362 struct fscache_cookie *fscache; 361 struct fscache_cookie *fscache;
363 u32 i_fscache_gen; /* sequence, for delayed fscache validate */ 362 u32 i_fscache_gen;
364 struct work_struct i_revalidate_work;
365#endif 363#endif
366 struct inode vfs_inode; /* at end */ 364 struct inode vfs_inode; /* at end */
367}; 365};
diff --git a/fs/coredump.c b/fs/coredump.c
index 38a7ab87e10a..281b768000e6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -794,6 +794,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
794 return 0; 794 return 0;
795 file->f_pos = pos; 795 file->f_pos = pos;
796 cprm->written += n; 796 cprm->written += n;
797 cprm->pos += n;
797 nr -= n; 798 nr -= n;
798 } 799 }
799 return 1; 800 return 1;
@@ -808,6 +809,7 @@ int dump_skip(struct coredump_params *cprm, size_t nr)
808 if (dump_interrupted() || 809 if (dump_interrupted() ||
809 file->f_op->llseek(file, nr, SEEK_CUR) < 0) 810 file->f_op->llseek(file, nr, SEEK_CUR) < 0)
810 return 0; 811 return 0;
812 cprm->pos += nr;
811 return 1; 813 return 1;
812 } else { 814 } else {
813 while (nr > PAGE_SIZE) { 815 while (nr > PAGE_SIZE) {
@@ -822,7 +824,7 @@ EXPORT_SYMBOL(dump_skip);
822 824
823int dump_align(struct coredump_params *cprm, int align) 825int dump_align(struct coredump_params *cprm, int align)
824{ 826{
825 unsigned mod = cprm->file->f_pos & (align - 1); 827 unsigned mod = cprm->pos & (align - 1);
826 if (align & (align - 1)) 828 if (align & (align - 1))
827 return 0; 829 return 0;
828 return mod ? dump_skip(cprm, align - mod) : 1; 830 return mod ? dump_skip(cprm, align - mod) : 1;
diff --git a/fs/dcache.c b/fs/dcache.c
index ad4a542e9bab..d6847d7b123d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry)
507} 507}
508EXPORT_SYMBOL(d_drop); 508EXPORT_SYMBOL(d_drop);
509 509
510static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
511{
512 struct dentry *next;
513 /*
514 * Inform d_walk() and shrink_dentry_list() that we are no longer
515 * attached to the dentry tree
516 */
517 dentry->d_flags |= DCACHE_DENTRY_KILLED;
518 if (unlikely(list_empty(&dentry->d_child)))
519 return;
520 __list_del_entry(&dentry->d_child);
521 /*
522 * Cursors can move around the list of children. While we'd been
523 * a normal list member, it didn't matter - ->d_child.next would've
524 * been updated. However, from now on it won't be and for the
525 * things like d_walk() it might end up with a nasty surprise.
526 * Normally d_walk() doesn't care about cursors moving around -
527 * ->d_lock on parent prevents that and since a cursor has no children
528 * of its own, we get through it without ever unlocking the parent.
529 * There is one exception, though - if we ascend from a child that
530 * gets killed as soon as we unlock it, the next sibling is found
531 * using the value left in its ->d_child.next. And if _that_
532 * pointed to a cursor, and cursor got moved (e.g. by lseek())
533 * before d_walk() regains parent->d_lock, we'll end up skipping
534 * everything the cursor had been moved past.
535 *
536 * Solution: make sure that the pointer left behind in ->d_child.next
537 * points to something that won't be moving around. I.e. skip the
538 * cursors.
539 */
540 while (dentry->d_child.next != &parent->d_subdirs) {
541 next = list_entry(dentry->d_child.next, struct dentry, d_child);
542 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
543 break;
544 dentry->d_child.next = next->d_child.next;
545 }
546}
547
510static void __dentry_kill(struct dentry *dentry) 548static void __dentry_kill(struct dentry *dentry)
511{ 549{
512 struct dentry *parent = NULL; 550 struct dentry *parent = NULL;
@@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry)
532 } 570 }
533 /* if it was on the hash then remove it */ 571 /* if it was on the hash then remove it */
534 __d_drop(dentry); 572 __d_drop(dentry);
535 __list_del_entry(&dentry->d_child); 573 dentry_unlist(dentry, parent);
536 /*
537 * Inform d_walk() that we are no longer attached to the
538 * dentry tree
539 */
540 dentry->d_flags |= DCACHE_DENTRY_KILLED;
541 if (parent) 574 if (parent)
542 spin_unlock(&parent->d_lock); 575 spin_unlock(&parent->d_lock);
543 dentry_iput(dentry); 576 dentry_iput(dentry);
@@ -1203,6 +1236,9 @@ resume:
1203 struct dentry *dentry = list_entry(tmp, struct dentry, d_child); 1236 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1204 next = tmp->next; 1237 next = tmp->next;
1205 1238
1239 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1240 continue;
1241
1206 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1242 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1207 1243
1208 ret = enter(data, dentry); 1244 ret = enter(data, dentry);
@@ -1636,7 +1672,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1636 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1672 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1637 if (!dentry) 1673 if (!dentry)
1638 return NULL; 1674 return NULL;
1639 1675 dentry->d_flags |= DCACHE_RCUACCESS;
1640 spin_lock(&parent->d_lock); 1676 spin_lock(&parent->d_lock);
1641 /* 1677 /*
1642 * don't need child lock because it is not subject 1678 * don't need child lock because it is not subject
@@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1651} 1687}
1652EXPORT_SYMBOL(d_alloc); 1688EXPORT_SYMBOL(d_alloc);
1653 1689
1690struct dentry *d_alloc_cursor(struct dentry * parent)
1691{
1692 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1693 if (dentry) {
1694 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1695 dentry->d_parent = dget(parent);
1696 }
1697 return dentry;
1698}
1699
1654/** 1700/**
1655 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) 1701 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1656 * @sb: the superblock 1702 * @sb: the superblock
@@ -2358,7 +2404,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2358{ 2404{
2359 BUG_ON(!d_unhashed(entry)); 2405 BUG_ON(!d_unhashed(entry));
2360 hlist_bl_lock(b); 2406 hlist_bl_lock(b);
2361 entry->d_flags |= DCACHE_RCUACCESS;
2362 hlist_bl_add_head_rcu(&entry->d_hash, b); 2407 hlist_bl_add_head_rcu(&entry->d_hash, b);
2363 hlist_bl_unlock(b); 2408 hlist_bl_unlock(b);
2364} 2409}
@@ -2458,7 +2503,6 @@ retry:
2458 rcu_read_unlock(); 2503 rcu_read_unlock();
2459 goto retry; 2504 goto retry;
2460 } 2505 }
2461 rcu_read_unlock();
2462 /* 2506 /*
2463 * No changes for the parent since the beginning of d_lookup(). 2507 * No changes for the parent since the beginning of d_lookup().
2464 * Since all removals from the chain happen with hlist_bl_lock(), 2508 * Since all removals from the chain happen with hlist_bl_lock(),
@@ -2471,8 +2515,6 @@ retry:
2471 continue; 2515 continue;
2472 if (dentry->d_parent != parent) 2516 if (dentry->d_parent != parent)
2473 continue; 2517 continue;
2474 if (d_unhashed(dentry))
2475 continue;
2476 if (parent->d_flags & DCACHE_OP_COMPARE) { 2518 if (parent->d_flags & DCACHE_OP_COMPARE) {
2477 int tlen = dentry->d_name.len; 2519 int tlen = dentry->d_name.len;
2478 const char *tname = dentry->d_name.name; 2520 const char *tname = dentry->d_name.name;
@@ -2484,9 +2526,18 @@ retry:
2484 if (dentry_cmp(dentry, str, len)) 2526 if (dentry_cmp(dentry, str, len))
2485 continue; 2527 continue;
2486 } 2528 }
2487 dget(dentry);
2488 hlist_bl_unlock(b); 2529 hlist_bl_unlock(b);
2489 /* somebody is doing lookup for it right now; wait for it */ 2530 /* now we can try to grab a reference */
2531 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2532 rcu_read_unlock();
2533 goto retry;
2534 }
2535
2536 rcu_read_unlock();
2537 /*
2538 * somebody is likely to be still doing lookup for it;
2539 * wait for them to finish
2540 */
2490 spin_lock(&dentry->d_lock); 2541 spin_lock(&dentry->d_lock);
2491 d_wait_lookup(dentry); 2542 d_wait_lookup(dentry);
2492 /* 2543 /*
@@ -2517,6 +2568,7 @@ retry:
2517 dput(new); 2568 dput(new);
2518 return dentry; 2569 return dentry;
2519 } 2570 }
2571 rcu_read_unlock();
2520 /* we can't take ->d_lock here; it's OK, though. */ 2572 /* we can't take ->d_lock here; it's OK, though. */
2521 new->d_flags |= DCACHE_PAR_LOOKUP; 2573 new->d_flags |= DCACHE_PAR_LOOKUP;
2522 new->d_wait = wq; 2574 new->d_wait = wq;
@@ -2843,6 +2895,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2843 /* ... and switch them in the tree */ 2895 /* ... and switch them in the tree */
2844 if (IS_ROOT(dentry)) { 2896 if (IS_ROOT(dentry)) {
2845 /* splicing a tree */ 2897 /* splicing a tree */
2898 dentry->d_flags |= DCACHE_RCUACCESS;
2846 dentry->d_parent = target->d_parent; 2899 dentry->d_parent = target->d_parent;
2847 target->d_parent = target; 2900 target->d_parent = target;
2848 list_del_init(&target->d_child); 2901 list_del_init(&target->d_child);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 9c1c9a01b7e5..592059f88e04 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
127 r = real_fops->open(inode, filp); 127 r = real_fops->open(inode, filp);
128 128
129out: 129out:
130 fops_put(real_fops);
131 debugfs_use_file_finish(srcu_idx); 130 debugfs_use_file_finish(srcu_idx);
132 return r; 131 return r;
133} 132}
@@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
262 261
263 if (real_fops->open) { 262 if (real_fops->open) {
264 r = real_fops->open(inode, filp); 263 r = real_fops->open(inode, filp);
265 264 if (r) {
266 if (filp->f_op != proxy_fops) { 265 replace_fops(filp, d_inode(dentry)->i_fop);
266 goto free_proxy;
267 } else if (filp->f_op != proxy_fops) {
267 /* No protection against file removal anymore. */ 268 /* No protection against file removal anymore. */
268 WARN(1, "debugfs file owner replaced proxy fops: %pd", 269 WARN(1, "debugfs file owner replaced proxy fops: %pd",
269 dentry); 270 dentry);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 0b2954d7172d..37c134a132c7 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = {
95 95
96static DEFINE_MUTEX(allocated_ptys_lock); 96static DEFINE_MUTEX(allocated_ptys_lock);
97 97
98static struct vfsmount *devpts_mnt;
99
100struct pts_mount_opts { 98struct pts_mount_opts {
101 int setuid; 99 int setuid;
102 int setgid; 100 int setgid;
@@ -104,7 +102,7 @@ struct pts_mount_opts {
104 kgid_t gid; 102 kgid_t gid;
105 umode_t mode; 103 umode_t mode;
106 umode_t ptmxmode; 104 umode_t ptmxmode;
107 int newinstance; 105 int reserve;
108 int max; 106 int max;
109}; 107};
110 108
@@ -117,11 +115,9 @@ static const match_table_t tokens = {
117 {Opt_uid, "uid=%u"}, 115 {Opt_uid, "uid=%u"},
118 {Opt_gid, "gid=%u"}, 116 {Opt_gid, "gid=%u"},
119 {Opt_mode, "mode=%o"}, 117 {Opt_mode, "mode=%o"},
120#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
121 {Opt_ptmxmode, "ptmxmode=%o"}, 118 {Opt_ptmxmode, "ptmxmode=%o"},
122 {Opt_newinstance, "newinstance"}, 119 {Opt_newinstance, "newinstance"},
123 {Opt_max, "max=%d"}, 120 {Opt_max, "max=%d"},
124#endif
125 {Opt_err, NULL} 121 {Opt_err, NULL}
126}; 122};
127 123
@@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
137 return sb->s_fs_info; 133 return sb->s_fs_info;
138} 134}
139 135
140static inline struct super_block *pts_sb_from_inode(struct inode *inode) 136struct pts_fs_info *devpts_acquire(struct file *filp)
141{ 137{
142#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES 138 struct pts_fs_info *result;
143 if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) 139 struct path path;
144 return inode->i_sb; 140 struct super_block *sb;
145#endif 141 int err;
146 if (!devpts_mnt) 142
147 return NULL; 143 path = filp->f_path;
148 return devpts_mnt->mnt_sb; 144 path_get(&path);
145
146 /* Has the devpts filesystem already been found? */
147 sb = path.mnt->mnt_sb;
148 if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
149 /* Is a devpts filesystem at "pts" in the same directory? */
150 err = path_pts(&path);
151 if (err) {
152 result = ERR_PTR(err);
153 goto out;
154 }
155
156 /* Is the path the root of a devpts filesystem? */
157 result = ERR_PTR(-ENODEV);
158 sb = path.mnt->mnt_sb;
159 if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
160 (path.mnt->mnt_root != sb->s_root))
161 goto out;
162 }
163
164 /*
165 * pty code needs to hold extra references in case of last /dev/tty close
166 */
167 atomic_inc(&sb->s_active);
168 result = DEVPTS_SB(sb);
169
170out:
171 path_put(&path);
172 return result;
173}
174
175void devpts_release(struct pts_fs_info *fsi)
176{
177 deactivate_super(fsi->sb);
149} 178}
150 179
151#define PARSE_MOUNT 0 180#define PARSE_MOUNT 0
@@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
154/* 183/*
155 * parse_mount_options(): 184 * parse_mount_options():
156 * Set @opts to mount options specified in @data. If an option is not 185 * Set @opts to mount options specified in @data. If an option is not
157 * specified in @data, set it to its default value. The exception is 186 * specified in @data, set it to its default value.
158 * 'newinstance' option which can only be set/cleared on a mount (i.e.
159 * cannot be changed during remount).
160 * 187 *
161 * Note: @data may be NULL (in which case all options are set to default). 188 * Note: @data may be NULL (in which case all options are set to default).
162 */ 189 */
@@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
174 opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 201 opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
175 opts->max = NR_UNIX98_PTY_MAX; 202 opts->max = NR_UNIX98_PTY_MAX;
176 203
177 /* newinstance makes sense only on initial mount */ 204 /* Only allow instances mounted from the initial mount
205 * namespace to tap the reserve pool of ptys.
206 */
178 if (op == PARSE_MOUNT) 207 if (op == PARSE_MOUNT)
179 opts->newinstance = 0; 208 opts->reserve =
209 (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
180 210
181 while ((p = strsep(&data, ",")) != NULL) { 211 while ((p = strsep(&data, ",")) != NULL) {
182 substring_t args[MAX_OPT_ARGS]; 212 substring_t args[MAX_OPT_ARGS];
@@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
211 return -EINVAL; 241 return -EINVAL;
212 opts->mode = option & S_IALLUGO; 242 opts->mode = option & S_IALLUGO;
213 break; 243 break;
214#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
215 case Opt_ptmxmode: 244 case Opt_ptmxmode:
216 if (match_octal(&args[0], &option)) 245 if (match_octal(&args[0], &option))
217 return -EINVAL; 246 return -EINVAL;
218 opts->ptmxmode = option & S_IALLUGO; 247 opts->ptmxmode = option & S_IALLUGO;
219 break; 248 break;
220 case Opt_newinstance: 249 case Opt_newinstance:
221 /* newinstance makes sense only on initial mount */
222 if (op == PARSE_MOUNT)
223 opts->newinstance = 1;
224 break; 250 break;
225 case Opt_max: 251 case Opt_max:
226 if (match_int(&args[0], &option) || 252 if (match_int(&args[0], &option) ||
@@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
228 return -EINVAL; 254 return -EINVAL;
229 opts->max = option; 255 opts->max = option;
230 break; 256 break;
231#endif
232 default: 257 default:
233 pr_err("called with bogus options\n"); 258 pr_err("called with bogus options\n");
234 return -EINVAL; 259 return -EINVAL;
@@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
238 return 0; 263 return 0;
239} 264}
240 265
241#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
242static int mknod_ptmx(struct super_block *sb) 266static int mknod_ptmx(struct super_block *sb)
243{ 267{
244 int mode; 268 int mode;
@@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
305 inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; 329 inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
306 } 330 }
307} 331}
308#else
309static inline void update_ptmx_mode(struct pts_fs_info *fsi)
310{
311 return;
312}
313#endif
314 332
315static int devpts_remount(struct super_block *sb, int *flags, char *data) 333static int devpts_remount(struct super_block *sb, int *flags, char *data)
316{ 334{
@@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
344 seq_printf(seq, ",gid=%u", 362 seq_printf(seq, ",gid=%u",
345 from_kgid_munged(&init_user_ns, opts->gid)); 363 from_kgid_munged(&init_user_ns, opts->gid));
346 seq_printf(seq, ",mode=%03o", opts->mode); 364 seq_printf(seq, ",mode=%03o", opts->mode);
347#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
348 seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); 365 seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
349 if (opts->max < NR_UNIX98_PTY_MAX) 366 if (opts->max < NR_UNIX98_PTY_MAX)
350 seq_printf(seq, ",max=%d", opts->max); 367 seq_printf(seq, ",max=%d", opts->max);
351#endif
352 368
353 return 0; 369 return 0;
354} 370}
@@ -410,40 +426,11 @@ fail:
410 return -ENOMEM; 426 return -ENOMEM;
411} 427}
412 428
413#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
414static int compare_init_pts_sb(struct super_block *s, void *p)
415{
416 if (devpts_mnt)
417 return devpts_mnt->mnt_sb == s;
418 return 0;
419}
420
421/* 429/*
422 * devpts_mount() 430 * devpts_mount()
423 * 431 *
424 * If the '-o newinstance' mount option was specified, mount a new 432 * Mount a new (private) instance of devpts. PTYs created in this
425 * (private) instance of devpts. PTYs created in this instance are 433 * instance are independent of the PTYs in other devpts instances.
426 * independent of the PTYs in other devpts instances.
427 *
428 * If the '-o newinstance' option was not specified, mount/remount the
429 * initial kernel mount of devpts. This type of mount gives the
430 * legacy, single-instance semantics.
431 *
432 * The 'newinstance' option is needed to support multiple namespace
433 * semantics in devpts while preserving backward compatibility of the
434 * current 'single-namespace' semantics. i.e all mounts of devpts
435 * without the 'newinstance' mount option should bind to the initial
436 * kernel mount, like mount_single().
437 *
438 * Mounts with 'newinstance' option create a new, private namespace.
439 *
440 * NOTE:
441 *
442 * For single-mount semantics, devpts cannot use mount_single(),
443 * because mount_single()/sget() find and use the super-block from
444 * the most recent mount of devpts. But that recent mount may be a
445 * 'newinstance' mount and mount_single() would pick the newinstance
446 * super-block instead of the initial super-block.
447 */ 434 */
448static struct dentry *devpts_mount(struct file_system_type *fs_type, 435static struct dentry *devpts_mount(struct file_system_type *fs_type,
449 int flags, const char *dev_name, void *data) 436 int flags, const char *dev_name, void *data)
@@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
456 if (error) 443 if (error)
457 return ERR_PTR(error); 444 return ERR_PTR(error);
458 445
459 /* Require newinstance for all user namespace mounts to ensure 446 s = sget(fs_type, NULL, set_anon_super, flags, NULL);
460 * the mount options are not changed.
461 */
462 if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
463 return ERR_PTR(-EINVAL);
464
465 if (opts.newinstance)
466 s = sget(fs_type, NULL, set_anon_super, flags, NULL);
467 else
468 s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
469 NULL);
470
471 if (IS_ERR(s)) 447 if (IS_ERR(s))
472 return ERR_CAST(s); 448 return ERR_CAST(s);
473 449
@@ -491,18 +467,6 @@ out_undo_sget:
491 return ERR_PTR(error); 467 return ERR_PTR(error);
492} 468}
493 469
494#else
495/*
496 * This supports only the legacy single-instance semantics (no
497 * multiple-instance semantics)
498 */
499static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
500 const char *dev_name, void *data)
501{
502 return mount_single(fs_type, flags, data, devpts_fill_super);
503}
504#endif
505
506static void devpts_kill_sb(struct super_block *sb) 470static void devpts_kill_sb(struct super_block *sb)
507{ 471{
508 struct pts_fs_info *fsi = DEVPTS_SB(sb); 472 struct pts_fs_info *fsi = DEVPTS_SB(sb);
@@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = {
516 .name = "devpts", 480 .name = "devpts",
517 .mount = devpts_mount, 481 .mount = devpts_mount,
518 .kill_sb = devpts_kill_sb, 482 .kill_sb = devpts_kill_sb,
519#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
520 .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT, 483 .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
521#endif
522}; 484};
523 485
524/* 486/*
@@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi)
531 int index; 493 int index;
532 int ida_ret; 494 int ida_ret;
533 495
534 if (!fsi)
535 return -ENODEV;
536
537retry: 496retry:
538 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 497 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
539 return -ENOMEM; 498 return -ENOMEM;
540 499
541 mutex_lock(&allocated_ptys_lock); 500 mutex_lock(&allocated_ptys_lock);
542 if (pty_count >= pty_limit - 501 if (pty_count >= (pty_limit -
543 (fsi->mount_opts.newinstance ? pty_reserve : 0)) { 502 (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
544 mutex_unlock(&allocated_ptys_lock); 503 mutex_unlock(&allocated_ptys_lock);
545 return -ENOSPC; 504 return -ENOSPC;
546 } 505 }
@@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
571 mutex_unlock(&allocated_ptys_lock); 530 mutex_unlock(&allocated_ptys_lock);
572} 531}
573 532
574/*
575 * pty code needs to hold extra references in case of last /dev/tty close
576 */
577struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
578{
579 struct super_block *sb;
580 struct pts_fs_info *fsi;
581
582 sb = pts_sb_from_inode(ptmx_inode);
583 if (!sb)
584 return NULL;
585 fsi = DEVPTS_SB(sb);
586 if (!fsi)
587 return NULL;
588
589 atomic_inc(&sb->s_active);
590 return fsi;
591}
592
593void devpts_put_ref(struct pts_fs_info *fsi)
594{
595 deactivate_super(fsi->sb);
596}
597
598/** 533/**
599 * devpts_pty_new -- create a new inode in /dev/pts/ 534 * devpts_pty_new -- create a new inode in /dev/pts/
600 * @ptmx_inode: inode of the master 535 * @ptmx_inode: inode of the master
@@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi)
607struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) 542struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
608{ 543{
609 struct dentry *dentry; 544 struct dentry *dentry;
610 struct super_block *sb; 545 struct super_block *sb = fsi->sb;
611 struct inode *inode; 546 struct inode *inode;
612 struct dentry *root; 547 struct dentry *root;
613 struct pts_mount_opts *opts; 548 struct pts_mount_opts *opts;
614 char s[12]; 549 char s[12];
615 550
616 if (!fsi)
617 return ERR_PTR(-ENODEV);
618
619 sb = fsi->sb;
620 root = sb->s_root; 551 root = sb->s_root;
621 opts = &fsi->mount_opts; 552 opts = &fsi->mount_opts;
622 553
@@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry)
676static int __init init_devpts_fs(void) 607static int __init init_devpts_fs(void)
677{ 608{
678 int err = register_filesystem(&devpts_fs_type); 609 int err = register_filesystem(&devpts_fs_type);
679 struct ctl_table_header *table;
680
681 if (!err) { 610 if (!err) {
682 struct vfsmount *mnt; 611 register_sysctl_table(pty_root_table);
683
684 table = register_sysctl_table(pty_root_table);
685 mnt = kern_mount(&devpts_fs_type);
686 if (IS_ERR(mnt)) {
687 err = PTR_ERR(mnt);
688 unregister_filesystem(&devpts_fs_type);
689 unregister_sysctl_table(table);
690 } else {
691 devpts_mnt = mnt;
692 }
693 } 612 }
694 return err; 613 return err;
695} 614}
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 866bb18efefe..e818f5ac7a26 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/mount.h> 27#include <linux/mount.h>
28#include <linux/file.h>
28#include "ecryptfs_kernel.h" 29#include "ecryptfs_kernel.h"
29 30
30struct ecryptfs_open_req { 31struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
147 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; 148 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
148 (*lower_file) = dentry_open(&req.path, flags, cred); 149 (*lower_file) = dentry_open(&req.path, flags, cred);
149 if (!IS_ERR(*lower_file)) 150 if (!IS_ERR(*lower_file))
150 goto out; 151 goto have_file;
151 if ((flags & O_ACCMODE) == O_RDONLY) { 152 if ((flags & O_ACCMODE) == O_RDONLY) {
152 rc = PTR_ERR((*lower_file)); 153 rc = PTR_ERR((*lower_file));
153 goto out; 154 goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
165 mutex_unlock(&ecryptfs_kthread_ctl.mux); 166 mutex_unlock(&ecryptfs_kthread_ctl.mux);
166 wake_up(&ecryptfs_kthread_ctl.wait); 167 wake_up(&ecryptfs_kthread_ctl.wait);
167 wait_for_completion(&req.done); 168 wait_for_completion(&req.done);
168 if (IS_ERR(*lower_file)) 169 if (IS_ERR(*lower_file)) {
169 rc = PTR_ERR(*lower_file); 170 rc = PTR_ERR(*lower_file);
171 goto out;
172 }
173have_file:
174 if ((*lower_file)->f_op->mmap == NULL) {
175 fput(*lower_file);
176 *lower_file = NULL;
177 rc = -EMEDIUMTYPE;
178 }
170out: 179out:
171 return rc; 180 return rc;
172} 181}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3078b679fcd1..c8c4f79c7ce1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
887 put_page(results[i]); 887 put_page(results[i]);
888 } 888 }
889 889
890 wake_up_bit(&cookie->flags, 0);
891
890 _leave(""); 892 _leave("");
891} 893}
892 894
diff --git a/fs/internal.h b/fs/internal.h
index b71deeecea17..f57ced528cde 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool);
130extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); 130extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
131extern int d_set_mounted(struct dentry *dentry); 131extern int d_set_mounted(struct dentry *dentry);
132extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); 132extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
133extern struct dentry *d_alloc_cursor(struct dentry *);
133 134
134/* 135/*
135 * read_write.c 136 * read_write.c
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b31852f76f46..e3ca4b4cac84 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
2329 2329
2330 BUG_ON(size & (size-1)); /* Must be a power of 2 */ 2330 BUG_ON(size & (size-1)); /* Must be a power of 2 */
2331 2331
2332 flags |= __GFP_REPEAT; 2332 if (size < PAGE_SIZE)
2333 if (size == PAGE_SIZE)
2334 ptr = (void *)__get_free_pages(flags, 0);
2335 else if (size > PAGE_SIZE) {
2336 int order = get_order(size);
2337
2338 if (order < 3)
2339 ptr = (void *)__get_free_pages(flags, order);
2340 else
2341 ptr = vmalloc(size);
2342 } else
2343 ptr = kmem_cache_alloc(get_slab(size), flags); 2333 ptr = kmem_cache_alloc(get_slab(size), flags);
2334 else
2335 ptr = (void *)__get_free_pages(flags, get_order(size));
2344 2336
2345 /* Check alignment; SLUB has gotten this wrong in the past, 2337 /* Check alignment; SLUB has gotten this wrong in the past,
2346 * and this can lead to user data corruption! */ 2338 * and this can lead to user data corruption! */
@@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
2351 2343
2352void jbd2_free(void *ptr, size_t size) 2344void jbd2_free(void *ptr, size_t size)
2353{ 2345{
2354 if (size == PAGE_SIZE) { 2346 if (size < PAGE_SIZE)
2355 free_pages((unsigned long)ptr, 0); 2347 kmem_cache_free(get_slab(size), ptr);
2356 return; 2348 else
2357 } 2349 free_pages((unsigned long)ptr, get_order(size));
2358 if (size > PAGE_SIZE) {
2359 int order = get_order(size);
2360
2361 if (order < 3)
2362 free_pages((unsigned long)ptr, order);
2363 else
2364 vfree(ptr);
2365 return;
2366 }
2367 kmem_cache_free(get_slab(size), ptr);
2368}; 2350};
2369 2351
2370/* 2352/*
diff --git a/fs/libfs.c b/fs/libfs.c
index 3db2721144c2..cedeacbae303 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup);
71 71
72int dcache_dir_open(struct inode *inode, struct file *file) 72int dcache_dir_open(struct inode *inode, struct file *file)
73{ 73{
74 static struct qstr cursor_name = QSTR_INIT(".", 1); 74 file->private_data = d_alloc_cursor(file->f_path.dentry);
75
76 file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
77 75
78 return file->private_data ? 0 : -ENOMEM; 76 return file->private_data ? 0 : -ENOMEM;
79} 77}
diff --git a/fs/namei.c b/fs/namei.c
index 4c4f95ac8aa5..70580ab1445c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path)
1416 } 1416 }
1417} 1417}
1418 1418
1419static int path_parent_directory(struct path *path)
1420{
1421 struct dentry *old = path->dentry;
1422 /* rare case of legitimate dget_parent()... */
1423 path->dentry = dget_parent(path->dentry);
1424 dput(old);
1425 if (unlikely(!path_connected(path)))
1426 return -ENOENT;
1427 return 0;
1428}
1429
1419static int follow_dotdot(struct nameidata *nd) 1430static int follow_dotdot(struct nameidata *nd)
1420{ 1431{
1421 while(1) { 1432 while(1) {
1422 struct dentry *old = nd->path.dentry;
1423
1424 if (nd->path.dentry == nd->root.dentry && 1433 if (nd->path.dentry == nd->root.dentry &&
1425 nd->path.mnt == nd->root.mnt) { 1434 nd->path.mnt == nd->root.mnt) {
1426 break; 1435 break;
1427 } 1436 }
1428 if (nd->path.dentry != nd->path.mnt->mnt_root) { 1437 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1429 /* rare case of legitimate dget_parent()... */ 1438 int ret = path_parent_directory(&nd->path);
1430 nd->path.dentry = dget_parent(nd->path.dentry); 1439 if (ret)
1431 dput(old); 1440 return ret;
1432 if (unlikely(!path_connected(&nd->path)))
1433 return -ENOENT;
1434 break; 1441 break;
1435 } 1442 }
1436 if (!follow_up(&nd->path)) 1443 if (!follow_up(&nd->path))
@@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name,
2514} 2521}
2515EXPORT_SYMBOL(lookup_one_len_unlocked); 2522EXPORT_SYMBOL(lookup_one_len_unlocked);
2516 2523
2524#ifdef CONFIG_UNIX98_PTYS
2525int path_pts(struct path *path)
2526{
2527 /* Find something mounted on "pts" in the same directory as
2528 * the input path.
2529 */
2530 struct dentry *child, *parent;
2531 struct qstr this;
2532 int ret;
2533
2534 ret = path_parent_directory(path);
2535 if (ret)
2536 return ret;
2537
2538 parent = path->dentry;
2539 this.name = "pts";
2540 this.len = 3;
2541 child = d_hash_and_lookup(parent, &this);
2542 if (!child)
2543 return -ENOENT;
2544
2545 path->dentry = child;
2546 dput(parent);
2547 follow_mount(path);
2548 return 0;
2549}
2550#endif
2551
2517int user_path_at_empty(int dfd, const char __user *name, unsigned flags, 2552int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2518 struct path *path, int *empty) 2553 struct path *path, int *empty)
2519{ 2554{
@@ -2995,9 +3030,13 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2995 } 3030 }
2996 if (*opened & FILE_CREATED) 3031 if (*opened & FILE_CREATED)
2997 fsnotify_create(dir, dentry); 3032 fsnotify_create(dir, dentry);
2998 path->dentry = dentry; 3033 if (unlikely(d_is_negative(dentry))) {
2999 path->mnt = nd->path.mnt; 3034 error = -ENOENT;
3000 return 1; 3035 } else {
3036 path->dentry = dentry;
3037 path->mnt = nd->path.mnt;
3038 return 1;
3039 }
3001 } 3040 }
3002 } 3041 }
3003 dput(dentry); 3042 dput(dentry);
@@ -3166,9 +3205,7 @@ static int do_last(struct nameidata *nd,
3166 int acc_mode = op->acc_mode; 3205 int acc_mode = op->acc_mode;
3167 unsigned seq; 3206 unsigned seq;
3168 struct inode *inode; 3207 struct inode *inode;
3169 struct path save_parent = { .dentry = NULL, .mnt = NULL };
3170 struct path path; 3208 struct path path;
3171 bool retried = false;
3172 int error; 3209 int error;
3173 3210
3174 nd->flags &= ~LOOKUP_PARENT; 3211 nd->flags &= ~LOOKUP_PARENT;
@@ -3211,7 +3248,6 @@ static int do_last(struct nameidata *nd,
3211 return -EISDIR; 3248 return -EISDIR;
3212 } 3249 }
3213 3250
3214retry_lookup:
3215 if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { 3251 if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
3216 error = mnt_want_write(nd->path.mnt); 3252 error = mnt_want_write(nd->path.mnt);
3217 if (!error) 3253 if (!error)
@@ -3263,6 +3299,10 @@ retry_lookup:
3263 got_write = false; 3299 got_write = false;
3264 } 3300 }
3265 3301
3302 error = follow_managed(&path, nd);
3303 if (unlikely(error < 0))
3304 return error;
3305
3266 if (unlikely(d_is_negative(path.dentry))) { 3306 if (unlikely(d_is_negative(path.dentry))) {
3267 path_to_nameidata(&path, nd); 3307 path_to_nameidata(&path, nd);
3268 return -ENOENT; 3308 return -ENOENT;
@@ -3278,10 +3318,6 @@ retry_lookup:
3278 return -EEXIST; 3318 return -EEXIST;
3279 } 3319 }
3280 3320
3281 error = follow_managed(&path, nd);
3282 if (unlikely(error < 0))
3283 return error;
3284
3285 seq = 0; /* out of RCU mode, so the value doesn't matter */ 3321 seq = 0; /* out of RCU mode, so the value doesn't matter */
3286 inode = d_backing_inode(path.dentry); 3322 inode = d_backing_inode(path.dentry);
3287finish_lookup: 3323finish_lookup:
@@ -3292,23 +3328,14 @@ finish_lookup:
3292 if (unlikely(error)) 3328 if (unlikely(error))
3293 return error; 3329 return error;
3294 3330
3295 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { 3331 path_to_nameidata(&path, nd);
3296 path_to_nameidata(&path, nd);
3297 } else {
3298 save_parent.dentry = nd->path.dentry;
3299 save_parent.mnt = mntget(path.mnt);
3300 nd->path.dentry = path.dentry;
3301
3302 }
3303 nd->inode = inode; 3332 nd->inode = inode;
3304 nd->seq = seq; 3333 nd->seq = seq;
3305 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ 3334 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
3306finish_open: 3335finish_open:
3307 error = complete_walk(nd); 3336 error = complete_walk(nd);
3308 if (error) { 3337 if (error)
3309 path_put(&save_parent);
3310 return error; 3338 return error;
3311 }
3312 audit_inode(nd->name, nd->path.dentry, 0); 3339 audit_inode(nd->name, nd->path.dentry, 0);
3313 error = -EISDIR; 3340 error = -EISDIR;
3314 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) 3341 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
@@ -3331,13 +3358,9 @@ finish_open_created:
3331 goto out; 3358 goto out;
3332 BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ 3359 BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
3333 error = vfs_open(&nd->path, file, current_cred()); 3360 error = vfs_open(&nd->path, file, current_cred());
3334 if (!error) { 3361 if (error)
3335 *opened |= FILE_OPENED;
3336 } else {
3337 if (error == -EOPENSTALE)
3338 goto stale_open;
3339 goto out; 3362 goto out;
3340 } 3363 *opened |= FILE_OPENED;
3341opened: 3364opened:
3342 error = open_check_o_direct(file); 3365 error = open_check_o_direct(file);
3343 if (!error) 3366 if (!error)
@@ -3353,26 +3376,7 @@ out:
3353 } 3376 }
3354 if (got_write) 3377 if (got_write)
3355 mnt_drop_write(nd->path.mnt); 3378 mnt_drop_write(nd->path.mnt);
3356 path_put(&save_parent);
3357 return error; 3379 return error;
3358
3359stale_open:
3360 /* If no saved parent or already retried then can't retry */
3361 if (!save_parent.dentry || retried)
3362 goto out;
3363
3364 BUG_ON(save_parent.dentry != dir);
3365 path_put(&nd->path);
3366 nd->path = save_parent;
3367 nd->inode = dir->d_inode;
3368 save_parent.mnt = NULL;
3369 save_parent.dentry = NULL;
3370 if (got_write) {
3371 mnt_drop_write(nd->path.mnt);
3372 got_write = false;
3373 }
3374 retried = true;
3375 goto retry_lookup;
3376} 3380}
3377 3381
3378static int do_tmpfile(struct nameidata *nd, unsigned flags, 3382static int do_tmpfile(struct nameidata *nd, unsigned flags,
diff --git a/fs/namespace.c b/fs/namespace.c
index 4fb1691b4355..783004af5707 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
2409 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; 2409 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2410 } 2410 }
2411 if (type->fs_flags & FS_USERNS_VISIBLE) { 2411 if (type->fs_flags & FS_USERNS_VISIBLE) {
2412 if (!fs_fully_visible(type, &mnt_flags)) 2412 if (!fs_fully_visible(type, &mnt_flags)) {
2413 put_filesystem(type);
2413 return -EPERM; 2414 return -EPERM;
2415 }
2414 } 2416 }
2415 } 2417 }
2416 2418
@@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3245 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) 3247 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
3246 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC); 3248 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
3247 3249
3250 /* Don't miss readonly hidden in the superblock flags */
3251 if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
3252 mnt_flags |= MNT_LOCK_READONLY;
3253
3248 /* Verify the mount flags are equal to or more permissive 3254 /* Verify the mount flags are equal to or more permissive
3249 * than the proposed new mount. 3255 * than the proposed new mount.
3250 */ 3256 */
@@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3271 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 3277 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3272 struct inode *inode = child->mnt_mountpoint->d_inode; 3278 struct inode *inode = child->mnt_mountpoint->d_inode;
3273 /* Only worry about locked mounts */ 3279 /* Only worry about locked mounts */
3274 if (!(mnt_flags & MNT_LOCKED)) 3280 if (!(child->mnt.mnt_flags & MNT_LOCKED))
3275 continue; 3281 continue;
3276 /* Is the directory permanetly empty? */ 3282 /* Is the directory permanetly empty? */
3277 if (!is_empty_dir_inode(inode)) 3283 if (!is_empty_dir_inode(inode))
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index e55b5242614d..31f3df193bdb 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -290,7 +290,7 @@ out_free_buf:
290 return error; 290 return error;
291} 291}
292 292
293#define NFSD_MDS_PR_KEY 0x0100000000000000 293#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
294 294
295/* 295/*
296 * We use the client ID as a unique key for the reservations. 296 * We use the client ID as a unique key for the reservations.
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 1580ea6fd64d..d08cd88155c7 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
104 goto out; 104 goto out;
105 105
106 inode = d_inode(fh->fh_dentry); 106 inode = d_inode(fh->fh_dentry);
107 if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
108 error = -EOPNOTSUPP;
109 goto out_errno;
110 }
111 107
112 error = fh_want_write(fh); 108 error = fh_want_write(fh);
113 if (error) 109 if (error)
114 goto out_errno; 110 goto out_errno;
115 111
116 error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); 112 fh_lock(fh);
113
114 error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
117 if (error) 115 if (error)
118 goto out_drop_write; 116 goto out_drop_lock;
119 error = inode->i_op->set_acl(inode, argp->acl_default, 117 error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
120 ACL_TYPE_DEFAULT);
121 if (error) 118 if (error)
122 goto out_drop_write; 119 goto out_drop_lock;
120
121 fh_unlock(fh);
123 122
124 fh_drop_write(fh); 123 fh_drop_write(fh);
125 124
@@ -131,7 +130,8 @@ out:
131 posix_acl_release(argp->acl_access); 130 posix_acl_release(argp->acl_access);
132 posix_acl_release(argp->acl_default); 131 posix_acl_release(argp->acl_default);
133 return nfserr; 132 return nfserr;
134out_drop_write: 133out_drop_lock:
134 fh_unlock(fh);
135 fh_drop_write(fh); 135 fh_drop_write(fh);
136out_errno: 136out_errno:
137 nfserr = nfserrno(error); 137 nfserr = nfserrno(error);
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 01df4cd7c753..0c890347cde3 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
95 goto out; 95 goto out;
96 96
97 inode = d_inode(fh->fh_dentry); 97 inode = d_inode(fh->fh_dentry);
98 if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
99 error = -EOPNOTSUPP;
100 goto out_errno;
101 }
102 98
103 error = fh_want_write(fh); 99 error = fh_want_write(fh);
104 if (error) 100 if (error)
105 goto out_errno; 101 goto out_errno;
106 102
107 error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); 103 fh_lock(fh);
104
105 error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
108 if (error) 106 if (error)
109 goto out_drop_write; 107 goto out_drop_lock;
110 error = inode->i_op->set_acl(inode, argp->acl_default, 108 error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
111 ACL_TYPE_DEFAULT);
112 109
113out_drop_write: 110out_drop_lock:
111 fh_unlock(fh);
114 fh_drop_write(fh); 112 fh_drop_write(fh);
115out_errno: 113out_errno:
116 nfserr = nfserrno(error); 114 nfserr = nfserrno(error);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 6adabd6049b7..71292a0d6f09 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
770 dentry = fhp->fh_dentry; 770 dentry = fhp->fh_dentry;
771 inode = d_inode(dentry); 771 inode = d_inode(dentry);
772 772
773 if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
774 return nfserr_attrnotsupp;
775
776 if (S_ISDIR(inode->i_mode)) 773 if (S_ISDIR(inode->i_mode))
777 flags = NFS4_ACL_DIR; 774 flags = NFS4_ACL_DIR;
778 775
@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
782 if (host_error < 0) 779 if (host_error < 0)
783 goto out_nfserr; 780 goto out_nfserr;
784 781
785 host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); 782 fh_lock(fhp);
783
784 host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
786 if (host_error < 0) 785 if (host_error < 0)
787 goto out_release; 786 goto out_drop_lock;
788 787
789 if (S_ISDIR(inode->i_mode)) { 788 if (S_ISDIR(inode->i_mode)) {
790 host_error = inode->i_op->set_acl(inode, dpacl, 789 host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
791 ACL_TYPE_DEFAULT);
792 } 790 }
793 791
794out_release: 792out_drop_lock:
793 fh_unlock(fhp);
794
795 posix_acl_release(pacl); 795 posix_acl_release(pacl);
796 posix_acl_release(dpacl); 796 posix_acl_release(dpacl);
797out_nfserr: 797out_nfserr:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7389cb1d7409..04c68d900324 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
710 } 710 }
711} 711}
712 712
713static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
714{
715 struct rpc_xprt *xprt;
716
717 if (args->protocol != XPRT_TRANSPORT_BC_TCP)
718 return rpc_create(args);
719
720 xprt = args->bc_xprt->xpt_bc_xprt;
721 if (xprt) {
722 xprt_get(xprt);
723 return rpc_create_xprt(args, xprt);
724 }
725
726 return rpc_create(args);
727}
728
729static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) 713static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
730{ 714{
731 int maxtime = max_cb_time(clp->net); 715 int maxtime = max_cb_time(clp->net);
@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
768 args.authflavor = ses->se_cb_sec.flavor; 752 args.authflavor = ses->se_cb_sec.flavor;
769 } 753 }
770 /* Create RPC client */ 754 /* Create RPC client */
771 client = create_backchannel_client(&args); 755 client = rpc_create(&args);
772 if (IS_ERR(client)) { 756 if (IS_ERR(client)) {
773 dprintk("NFSD: couldn't create callback client: %ld\n", 757 dprintk("NFSD: couldn't create callback client: %ld\n",
774 PTR_ERR(client)); 758 PTR_ERR(client));
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f5f82e145018..70d0b9b33031 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3480} 3480}
3481 3481
3482static struct nfs4_ol_stateid * 3482static struct nfs4_ol_stateid *
3483init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, 3483init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3484 struct nfsd4_open *open)
3485{ 3484{
3486 3485
3487 struct nfs4_openowner *oo = open->op_openowner; 3486 struct nfs4_openowner *oo = open->op_openowner;
3488 struct nfs4_ol_stateid *retstp = NULL; 3487 struct nfs4_ol_stateid *retstp = NULL;
3488 struct nfs4_ol_stateid *stp;
3489
3490 stp = open->op_stp;
3491 /* We are moving these outside of the spinlocks to avoid the warnings */
3492 mutex_init(&stp->st_mutex);
3493 mutex_lock(&stp->st_mutex);
3489 3494
3490 spin_lock(&oo->oo_owner.so_client->cl_lock); 3495 spin_lock(&oo->oo_owner.so_client->cl_lock);
3491 spin_lock(&fp->fi_lock); 3496 spin_lock(&fp->fi_lock);
@@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3493 retstp = nfsd4_find_existing_open(fp, open); 3498 retstp = nfsd4_find_existing_open(fp, open);
3494 if (retstp) 3499 if (retstp)
3495 goto out_unlock; 3500 goto out_unlock;
3501
3502 open->op_stp = NULL;
3496 atomic_inc(&stp->st_stid.sc_count); 3503 atomic_inc(&stp->st_stid.sc_count);
3497 stp->st_stid.sc_type = NFS4_OPEN_STID; 3504 stp->st_stid.sc_type = NFS4_OPEN_STID;
3498 INIT_LIST_HEAD(&stp->st_locks); 3505 INIT_LIST_HEAD(&stp->st_locks);
@@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3502 stp->st_access_bmap = 0; 3509 stp->st_access_bmap = 0;
3503 stp->st_deny_bmap = 0; 3510 stp->st_deny_bmap = 0;
3504 stp->st_openstp = NULL; 3511 stp->st_openstp = NULL;
3505 init_rwsem(&stp->st_rwsem);
3506 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 3512 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3507 list_add(&stp->st_perfile, &fp->fi_stateids); 3513 list_add(&stp->st_perfile, &fp->fi_stateids);
3508 3514
3509out_unlock: 3515out_unlock:
3510 spin_unlock(&fp->fi_lock); 3516 spin_unlock(&fp->fi_lock);
3511 spin_unlock(&oo->oo_owner.so_client->cl_lock); 3517 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3512 return retstp; 3518 if (retstp) {
3519 mutex_lock(&retstp->st_mutex);
3520 /* To keep mutex tracking happy */
3521 mutex_unlock(&stp->st_mutex);
3522 stp = retstp;
3523 }
3524 return stp;
3513} 3525}
3514 3526
3515/* 3527/*
@@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4305 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 4317 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4306 struct nfs4_file *fp = NULL; 4318 struct nfs4_file *fp = NULL;
4307 struct nfs4_ol_stateid *stp = NULL; 4319 struct nfs4_ol_stateid *stp = NULL;
4308 struct nfs4_ol_stateid *swapstp = NULL;
4309 struct nfs4_delegation *dp = NULL; 4320 struct nfs4_delegation *dp = NULL;
4310 __be32 status; 4321 __be32 status;
4311 4322
@@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4335 */ 4346 */
4336 if (stp) { 4347 if (stp) {
4337 /* Stateid was found, this is an OPEN upgrade */ 4348 /* Stateid was found, this is an OPEN upgrade */
4338 down_read(&stp->st_rwsem); 4349 mutex_lock(&stp->st_mutex);
4339 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 4350 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4340 if (status) { 4351 if (status) {
4341 up_read(&stp->st_rwsem); 4352 mutex_unlock(&stp->st_mutex);
4342 goto out; 4353 goto out;
4343 } 4354 }
4344 } else { 4355 } else {
4345 stp = open->op_stp; 4356 /* stp is returned locked. */
4346 open->op_stp = NULL; 4357 stp = init_open_stateid(fp, open);
4347 swapstp = init_open_stateid(stp, fp, open); 4358 /* See if we lost the race to some other thread */
4348 if (swapstp) { 4359 if (stp->st_access_bmap != 0) {
4349 nfs4_put_stid(&stp->st_stid);
4350 stp = swapstp;
4351 down_read(&stp->st_rwsem);
4352 status = nfs4_upgrade_open(rqstp, fp, current_fh, 4360 status = nfs4_upgrade_open(rqstp, fp, current_fh,
4353 stp, open); 4361 stp, open);
4354 if (status) { 4362 if (status) {
4355 up_read(&stp->st_rwsem); 4363 mutex_unlock(&stp->st_mutex);
4356 goto out; 4364 goto out;
4357 } 4365 }
4358 goto upgrade_out; 4366 goto upgrade_out;
4359 } 4367 }
4360 down_read(&stp->st_rwsem);
4361 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 4368 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4362 if (status) { 4369 if (status) {
4363 up_read(&stp->st_rwsem); 4370 mutex_unlock(&stp->st_mutex);
4364 release_open_stateid(stp); 4371 release_open_stateid(stp);
4365 goto out; 4372 goto out;
4366 } 4373 }
@@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4372 } 4379 }
4373upgrade_out: 4380upgrade_out:
4374 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 4381 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4375 up_read(&stp->st_rwsem); 4382 mutex_unlock(&stp->st_mutex);
4376 4383
4377 if (nfsd4_has_session(&resp->cstate)) { 4384 if (nfsd4_has_session(&resp->cstate)) {
4378 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 4385 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
@@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4977 * revoked delegations are kept only for free_stateid. 4984 * revoked delegations are kept only for free_stateid.
4978 */ 4985 */
4979 return nfserr_bad_stateid; 4986 return nfserr_bad_stateid;
4980 down_write(&stp->st_rwsem); 4987 mutex_lock(&stp->st_mutex);
4981 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4988 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4982 if (status == nfs_ok) 4989 if (status == nfs_ok)
4983 status = nfs4_check_fh(current_fh, &stp->st_stid); 4990 status = nfs4_check_fh(current_fh, &stp->st_stid);
4984 if (status != nfs_ok) 4991 if (status != nfs_ok)
4985 up_write(&stp->st_rwsem); 4992 mutex_unlock(&stp->st_mutex);
4986 return status; 4993 return status;
4987} 4994}
4988 4995
@@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
5030 return status; 5037 return status;
5031 oo = openowner(stp->st_stateowner); 5038 oo = openowner(stp->st_stateowner);
5032 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5039 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5033 up_write(&stp->st_rwsem); 5040 mutex_unlock(&stp->st_mutex);
5034 nfs4_put_stid(&stp->st_stid); 5041 nfs4_put_stid(&stp->st_stid);
5035 return nfserr_bad_stateid; 5042 return nfserr_bad_stateid;
5036 } 5043 }
@@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5062 oo = openowner(stp->st_stateowner); 5069 oo = openowner(stp->st_stateowner);
5063 status = nfserr_bad_stateid; 5070 status = nfserr_bad_stateid;
5064 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 5071 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5065 up_write(&stp->st_rwsem); 5072 mutex_unlock(&stp->st_mutex);
5066 goto put_stateid; 5073 goto put_stateid;
5067 } 5074 }
5068 oo->oo_flags |= NFS4_OO_CONFIRMED; 5075 oo->oo_flags |= NFS4_OO_CONFIRMED;
5069 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 5076 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5070 up_write(&stp->st_rwsem); 5077 mutex_unlock(&stp->st_mutex);
5071 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 5078 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5072 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 5079 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5073 5080
@@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
5143 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 5150 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5144 status = nfs_ok; 5151 status = nfs_ok;
5145put_stateid: 5152put_stateid:
5146 up_write(&stp->st_rwsem); 5153 mutex_unlock(&stp->st_mutex);
5147 nfs4_put_stid(&stp->st_stid); 5154 nfs4_put_stid(&stp->st_stid);
5148out: 5155out:
5149 nfsd4_bump_seqid(cstate, status); 5156 nfsd4_bump_seqid(cstate, status);
@@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5196 if (status) 5203 if (status)
5197 goto out; 5204 goto out;
5198 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 5205 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5199 up_write(&stp->st_rwsem); 5206 mutex_unlock(&stp->st_mutex);
5200 5207
5201 nfsd4_close_open_stateid(stp); 5208 nfsd4_close_open_stateid(stp);
5202 5209
@@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5422 stp->st_access_bmap = 0; 5429 stp->st_access_bmap = 0;
5423 stp->st_deny_bmap = open_stp->st_deny_bmap; 5430 stp->st_deny_bmap = open_stp->st_deny_bmap;
5424 stp->st_openstp = open_stp; 5431 stp->st_openstp = open_stp;
5425 init_rwsem(&stp->st_rwsem); 5432 mutex_init(&stp->st_mutex);
5426 list_add(&stp->st_locks, &open_stp->st_locks); 5433 list_add(&stp->st_locks, &open_stp->st_locks);
5427 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 5434 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5428 spin_lock(&fp->fi_lock); 5435 spin_lock(&fp->fi_lock);
@@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5591 &open_stp, nn); 5598 &open_stp, nn);
5592 if (status) 5599 if (status)
5593 goto out; 5600 goto out;
5594 up_write(&open_stp->st_rwsem); 5601 mutex_unlock(&open_stp->st_mutex);
5595 open_sop = openowner(open_stp->st_stateowner); 5602 open_sop = openowner(open_stp->st_stateowner);
5596 status = nfserr_bad_stateid; 5603 status = nfserr_bad_stateid;
5597 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 5604 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
@@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5600 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5607 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5601 &lock_stp, &new); 5608 &lock_stp, &new);
5602 if (status == nfs_ok) 5609 if (status == nfs_ok)
5603 down_write(&lock_stp->st_rwsem); 5610 mutex_lock(&lock_stp->st_mutex);
5604 } else { 5611 } else {
5605 status = nfs4_preprocess_seqid_op(cstate, 5612 status = nfs4_preprocess_seqid_op(cstate,
5606 lock->lk_old_lock_seqid, 5613 lock->lk_old_lock_seqid,
@@ -5704,7 +5711,7 @@ out:
5704 seqid_mutating_err(ntohl(status))) 5711 seqid_mutating_err(ntohl(status)))
5705 lock_sop->lo_owner.so_seqid++; 5712 lock_sop->lo_owner.so_seqid++;
5706 5713
5707 up_write(&lock_stp->st_rwsem); 5714 mutex_unlock(&lock_stp->st_mutex);
5708 5715
5709 /* 5716 /*
5710 * If this is a new, never-before-used stateid, and we are 5717 * If this is a new, never-before-used stateid, and we are
@@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5874fput: 5881fput:
5875 fput(filp); 5882 fput(filp);
5876put_stateid: 5883put_stateid:
5877 up_write(&stp->st_rwsem); 5884 mutex_unlock(&stp->st_mutex);
5878 nfs4_put_stid(&stp->st_stid); 5885 nfs4_put_stid(&stp->st_stid);
5879out: 5886out:
5880 nfsd4_bump_seqid(cstate, status); 5887 nfsd4_bump_seqid(cstate, status);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 986e51e5ceac..64053eadeb81 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
535 unsigned char st_access_bmap; 535 unsigned char st_access_bmap;
536 unsigned char st_deny_bmap; 536 unsigned char st_deny_bmap;
537 struct nfs4_ol_stateid *st_openstp; 537 struct nfs4_ol_stateid *st_openstp;
538 struct rw_semaphore st_rwsem; 538 struct mutex st_mutex;
539}; 539};
540 540
541static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) 541static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 809bd2de7ad0..e9fd241b9a0a 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
439 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 439 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
440 return 0; 440 return 0;
441 bytes = le16_to_cpu(sbp->s_bytes); 441 bytes = le16_to_cpu(sbp->s_bytes);
442 if (bytes > BLOCK_SIZE) 442 if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
443 return 0; 443 return 0;
444 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 444 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
445 sumoff); 445 sumoff);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index e27e6527912b..4342c7ee7d20 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -1,7 +1,5 @@
1ccflags-y := -Ifs/ocfs2 1ccflags-y := -Ifs/ocfs2
2 2
3ccflags-y += -DCATCH_BH_JBD_RACES
4
5obj-$(CONFIG_OCFS2_FS) += \ 3obj-$(CONFIG_OCFS2_FS) += \
6 ocfs2.o \ 4 ocfs2.o \
7 ocfs2_stackglue.o 5 ocfs2_stackglue.o
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index fe50ded1b4ce..498641eed2db 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
139 139
140 lock_buffer(bh); 140 lock_buffer(bh);
141 if (buffer_jbd(bh)) { 141 if (buffer_jbd(bh)) {
142#ifdef CATCH_BH_JBD_RACES
142 mlog(ML_ERROR, 143 mlog(ML_ERROR,
143 "block %llu had the JBD bit set " 144 "block %llu had the JBD bit set "
144 "while I was in lock_buffer!", 145 "while I was in lock_buffer!",
145 (unsigned long long)bh->b_blocknr); 146 (unsigned long long)bh->b_blocknr);
146 BUG(); 147 BUG();
148#else
149 unlock_buffer(bh);
150 continue;
151#endif
147 } 152 }
148 153
149 clear_buffer_uptodate(bh); 154 clear_buffer_uptodate(bh);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 22f0253a3567..c2a6b0894022 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink); 405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
406 } else { 406 } else {
407 const struct cred *old_cred; 407 const struct cred *old_cred;
408 struct cred *override_cred;
408 409
409 old_cred = ovl_override_creds(dentry->d_sb); 410 old_cred = ovl_override_creds(dentry->d_sb);
410 411
411 err = ovl_create_over_whiteout(dentry, inode, &stat, link, 412 err = -ENOMEM;
412 hardlink); 413 override_cred = prepare_creds();
414 if (override_cred) {
415 override_cred->fsuid = old_cred->fsuid;
416 override_cred->fsgid = old_cred->fsgid;
417 put_cred(override_creds(override_cred));
418 put_cred(override_cred);
413 419
420 err = ovl_create_over_whiteout(dentry, inode, &stat,
421 link, hardlink);
422 }
414 revert_creds(old_cred); 423 revert_creds(old_cred);
415 } 424 }
416 425
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 0ed7c4012437..1dbeab6cf96e 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -238,41 +238,27 @@ out:
238 return err; 238 return err;
239} 239}
240 240
241static bool ovl_need_xattr_filter(struct dentry *dentry,
242 enum ovl_path_type type)
243{
244 if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
245 return S_ISDIR(dentry->d_inode->i_mode);
246 else
247 return false;
248}
249
250ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, 241ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
251 const char *name, void *value, size_t size) 242 const char *name, void *value, size_t size)
252{ 243{
253 struct path realpath; 244 struct dentry *realdentry = ovl_dentry_real(dentry);
254 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
255 245
256 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 246 if (ovl_is_private_xattr(name))
257 return -ENODATA; 247 return -ENODATA;
258 248
259 return vfs_getxattr(realpath.dentry, name, value, size); 249 return vfs_getxattr(realdentry, name, value, size);
260} 250}
261 251
262ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 252ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
263{ 253{
264 struct path realpath; 254 struct dentry *realdentry = ovl_dentry_real(dentry);
265 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
266 ssize_t res; 255 ssize_t res;
267 int off; 256 int off;
268 257
269 res = vfs_listxattr(realpath.dentry, list, size); 258 res = vfs_listxattr(realdentry, list, size);
270 if (res <= 0 || size == 0) 259 if (res <= 0 || size == 0)
271 return res; 260 return res;
272 261
273 if (!ovl_need_xattr_filter(dentry, type))
274 return res;
275
276 /* filter out private xattrs */ 262 /* filter out private xattrs */
277 for (off = 0; off < res;) { 263 for (off = 0; off < res;) {
278 char *s = list + off; 264 char *s = list + off;
@@ -302,7 +288,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
302 goto out; 288 goto out;
303 289
304 err = -ENODATA; 290 err = -ENODATA;
305 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 291 if (ovl_is_private_xattr(name))
306 goto out_drop_write; 292 goto out_drop_write;
307 293
308 if (!OVL_TYPE_UPPER(type)) { 294 if (!OVL_TYPE_UPPER(type)) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 8a4a266beff3..edc452c2a563 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
820 return error; 820 return error;
821} 821}
822 822
823static int 823int
824posix_acl_xattr_set(const struct xattr_handler *handler, 824set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
825 struct dentry *unused, struct inode *inode,
826 const char *name, const void *value,
827 size_t size, int flags)
828{ 825{
829 struct posix_acl *acl = NULL;
830 int ret;
831
832 if (!IS_POSIXACL(inode)) 826 if (!IS_POSIXACL(inode))
833 return -EOPNOTSUPP; 827 return -EOPNOTSUPP;
834 if (!inode->i_op->set_acl) 828 if (!inode->i_op->set_acl)
835 return -EOPNOTSUPP; 829 return -EOPNOTSUPP;
836 830
837 if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) 831 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
838 return value ? -EACCES : 0; 832 return acl ? -EACCES : 0;
839 if (!inode_owner_or_capable(inode)) 833 if (!inode_owner_or_capable(inode))
840 return -EPERM; 834 return -EPERM;
841 835
836 if (acl) {
837 int ret = posix_acl_valid(acl);
838 if (ret)
839 return ret;
840 }
841 return inode->i_op->set_acl(inode, acl, type);
842}
843EXPORT_SYMBOL(set_posix_acl);
844
845static int
846posix_acl_xattr_set(const struct xattr_handler *handler,
847 struct dentry *unused, struct inode *inode,
848 const char *name, const void *value,
849 size_t size, int flags)
850{
851 struct posix_acl *acl = NULL;
852 int ret;
853
842 if (value) { 854 if (value) {
843 acl = posix_acl_from_xattr(&init_user_ns, value, size); 855 acl = posix_acl_from_xattr(&init_user_ns, value, size);
844 if (IS_ERR(acl)) 856 if (IS_ERR(acl))
845 return PTR_ERR(acl); 857 return PTR_ERR(acl);
846
847 if (acl) {
848 ret = posix_acl_valid(acl);
849 if (ret)
850 goto out;
851 }
852 } 858 }
853 859 ret = set_posix_acl(inode, handler->flags, acl);
854 ret = inode->i_op->set_acl(inode, acl, handler->flags);
855out:
856 posix_acl_release(acl); 860 posix_acl_release(acl);
857 return ret; 861 return ret;
858} 862}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 55bc7d6c8aac..06702783bf40 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
121 if (IS_ERR(sb)) 121 if (IS_ERR(sb))
122 return ERR_CAST(sb); 122 return ERR_CAST(sb);
123 123
124 /*
125 * procfs isn't actually a stacking filesystem; however, there is
126 * too much magic going on inside it to permit stacking things on
127 * top of it
128 */
129 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
130
124 if (!proc_parse_options(options, ns)) { 131 if (!proc_parse_options(options, ns)) {
125 deactivate_locked_super(sb); 132 deactivate_locked_super(sb);
126 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b8f2d1e8c645..c72c16c5a60f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1393 unsigned long safe_mask = 0; 1393 unsigned long safe_mask = 0;
1394 unsigned int commit_max_age = (unsigned int)-1; 1394 unsigned int commit_max_age = (unsigned int)-1;
1395 struct reiserfs_journal *journal = SB_JOURNAL(s); 1395 struct reiserfs_journal *journal = SB_JOURNAL(s);
1396 char *new_opts = kstrdup(arg, GFP_KERNEL); 1396 char *new_opts;
1397 int err; 1397 int err;
1398 char *qf_names[REISERFS_MAXQUOTAS]; 1398 char *qf_names[REISERFS_MAXQUOTAS];
1399 unsigned int qfmt = 0; 1399 unsigned int qfmt = 0;
@@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1401 int i; 1401 int i;
1402#endif 1402#endif
1403 1403
1404 new_opts = kstrdup(arg, GFP_KERNEL);
1405 if (arg && !new_opts)
1406 return -ENOMEM;
1407
1404 sync_filesystem(s); 1408 sync_filesystem(s);
1405 reiserfs_write_lock(s); 1409 reiserfs_write_lock(s);
1406 1410
@@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1546 } 1550 }
1547 1551
1548out_ok_unlocked: 1552out_ok_unlocked:
1549 replace_mount_options(s, new_opts); 1553 if (new_opts)
1554 replace_mount_options(s, new_opts);
1550 return 0; 1555 return 0;
1551 1556
1552out_err_unlock: 1557out_err_unlock:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 08316972ff93..7bbf420d1289 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -52,6 +52,7 @@
52#include "ubifs.h" 52#include "ubifs.h"
53#include <linux/mount.h> 53#include <linux/mount.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/migrate.h>
55 56
56static int read_block(struct inode *inode, void *addr, unsigned int block, 57static int read_block(struct inode *inode, void *addr, unsigned int block,
57 struct ubifs_data_node *dn) 58 struct ubifs_data_node *dn)
@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
1452 return ret; 1453 return ret;
1453} 1454}
1454 1455
1456#ifdef CONFIG_MIGRATION
1457static int ubifs_migrate_page(struct address_space *mapping,
1458 struct page *newpage, struct page *page, enum migrate_mode mode)
1459{
1460 int rc;
1461
1462 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1463 if (rc != MIGRATEPAGE_SUCCESS)
1464 return rc;
1465
1466 if (PagePrivate(page)) {
1467 ClearPagePrivate(page);
1468 SetPagePrivate(newpage);
1469 }
1470
1471 migrate_page_copy(newpage, page);
1472 return MIGRATEPAGE_SUCCESS;
1473}
1474#endif
1475
1455static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) 1476static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1456{ 1477{
1457 /* 1478 /*
@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
1591 .write_end = ubifs_write_end, 1612 .write_end = ubifs_write_end,
1592 .invalidatepage = ubifs_invalidatepage, 1613 .invalidatepage = ubifs_invalidatepage,
1593 .set_page_dirty = ubifs_set_page_dirty, 1614 .set_page_dirty = ubifs_set_page_dirty,
1615#ifdef CONFIG_MIGRATION
1616 .migratepage = ubifs_migrate_page,
1617#endif
1594 .releasepage = ubifs_releasepage, 1618 .releasepage = ubifs_releasepage,
1595}; 1619};
1596 1620
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 5f861ed287c3..888c364b2fe9 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
295 map = &UDF_SB(sb)->s_partmaps[partition]; 295 map = &UDF_SB(sb)->s_partmaps[partition];
296 /* map to sparable/physical partition desc */ 296 /* map to sparable/physical partition desc */
297 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, 297 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
298 map->s_partition_num, ext_offset + offset); 298 map->s_type_specific.s_metadata.s_phys_partition_ref,
299 ext_offset + offset);
299 } 300 }
300 301
301 brelse(epos.bh); 302 brelse(epos.bh);
@@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
317 mdata = &map->s_type_specific.s_metadata; 318 mdata = &map->s_type_specific.s_metadata;
318 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; 319 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
319 320
320 /* We shouldn't mount such media... */ 321 if (!inode)
321 BUG_ON(!inode); 322 return 0xFFFFFFFF;
323
322 retblk = udf_try_read_meta(inode, block, partition, offset); 324 retblk = udf_try_read_meta(inode, block, partition, offset);
323 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { 325 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
324 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); 326 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
325 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { 327 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
326 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, 328 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
327 mdata->s_mirror_file_loc, map->s_partition_num); 329 mdata->s_mirror_file_loc,
330 mdata->s_phys_partition_ref);
331 if (IS_ERR(mdata->s_mirror_fe))
332 mdata->s_mirror_fe = NULL;
328 mdata->s_flags |= MF_MIRROR_FE_LOADED; 333 mdata->s_flags |= MF_MIRROR_FE_LOADED;
329 } 334 }
330 335
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 5e2c8c814e1b..4942549e7dc8 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -951,13 +951,13 @@ out2:
951} 951}
952 952
953struct inode *udf_find_metadata_inode_efe(struct super_block *sb, 953struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
954 u32 meta_file_loc, u32 partition_num) 954 u32 meta_file_loc, u32 partition_ref)
955{ 955{
956 struct kernel_lb_addr addr; 956 struct kernel_lb_addr addr;
957 struct inode *metadata_fe; 957 struct inode *metadata_fe;
958 958
959 addr.logicalBlockNum = meta_file_loc; 959 addr.logicalBlockNum = meta_file_loc;
960 addr.partitionReferenceNum = partition_num; 960 addr.partitionReferenceNum = partition_ref;
961 961
962 metadata_fe = udf_iget_special(sb, &addr); 962 metadata_fe = udf_iget_special(sb, &addr);
963 963
@@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
974 return metadata_fe; 974 return metadata_fe;
975} 975}
976 976
977static int udf_load_metadata_files(struct super_block *sb, int partition) 977static int udf_load_metadata_files(struct super_block *sb, int partition,
978 int type1_index)
978{ 979{
979 struct udf_sb_info *sbi = UDF_SB(sb); 980 struct udf_sb_info *sbi = UDF_SB(sb);
980 struct udf_part_map *map; 981 struct udf_part_map *map;
@@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
984 985
985 map = &sbi->s_partmaps[partition]; 986 map = &sbi->s_partmaps[partition];
986 mdata = &map->s_type_specific.s_metadata; 987 mdata = &map->s_type_specific.s_metadata;
988 mdata->s_phys_partition_ref = type1_index;
987 989
988 /* metadata address */ 990 /* metadata address */
989 udf_debug("Metadata file location: block = %d part = %d\n", 991 udf_debug("Metadata file location: block = %d part = %d\n",
990 mdata->s_meta_file_loc, map->s_partition_num); 992 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
991 993
992 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, 994 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
993 map->s_partition_num); 995 mdata->s_phys_partition_ref);
994 if (IS_ERR(fe)) { 996 if (IS_ERR(fe)) {
995 /* mirror file entry */ 997 /* mirror file entry */
996 udf_debug("Mirror metadata file location: block = %d part = %d\n", 998 udf_debug("Mirror metadata file location: block = %d part = %d\n",
997 mdata->s_mirror_file_loc, map->s_partition_num); 999 mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
998 1000
999 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, 1001 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
1000 map->s_partition_num); 1002 mdata->s_phys_partition_ref);
1001 1003
1002 if (IS_ERR(fe)) { 1004 if (IS_ERR(fe)) {
1003 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); 1005 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
@@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
1015 */ 1017 */
1016 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { 1018 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1017 addr.logicalBlockNum = mdata->s_bitmap_file_loc; 1019 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1018 addr.partitionReferenceNum = map->s_partition_num; 1020 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
1019 1021
1020 udf_debug("Bitmap file location: block = %d part = %d\n", 1022 udf_debug("Bitmap file location: block = %d part = %d\n",
1021 addr.logicalBlockNum, addr.partitionReferenceNum); 1023 addr.logicalBlockNum, addr.partitionReferenceNum);
@@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1283 p = (struct partitionDesc *)bh->b_data; 1285 p = (struct partitionDesc *)bh->b_data;
1284 partitionNumber = le16_to_cpu(p->partitionNumber); 1286 partitionNumber = le16_to_cpu(p->partitionNumber);
1285 1287
1286 /* First scan for TYPE1, SPARABLE and METADATA partitions */ 1288 /* First scan for TYPE1 and SPARABLE partitions */
1287 for (i = 0; i < sbi->s_partitions; i++) { 1289 for (i = 0; i < sbi->s_partitions; i++) {
1288 map = &sbi->s_partmaps[i]; 1290 map = &sbi->s_partmaps[i];
1289 udf_debug("Searching map: (%d == %d)\n", 1291 udf_debug("Searching map: (%d == %d)\n",
@@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1333 goto out_bh; 1335 goto out_bh;
1334 1336
1335 if (map->s_partition_type == UDF_METADATA_MAP25) { 1337 if (map->s_partition_type == UDF_METADATA_MAP25) {
1336 ret = udf_load_metadata_files(sb, i); 1338 ret = udf_load_metadata_files(sb, i, type1_idx);
1337 if (ret < 0) { 1339 if (ret < 0) {
1338 udf_err(sb, "error loading MetaData partition map %d\n", 1340 udf_err(sb, "error loading MetaData partition map %d\n",
1339 i); 1341 i);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 27b5335730c9..c13875d669c0 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -61,6 +61,11 @@ struct udf_meta_data {
61 __u32 s_bitmap_file_loc; 61 __u32 s_bitmap_file_loc;
62 __u32 s_alloc_unit_size; 62 __u32 s_alloc_unit_size;
63 __u16 s_align_unit_size; 63 __u16 s_align_unit_size;
64 /*
65 * Partition Reference Number of the associated physical / sparable
66 * partition
67 */
68 __u16 s_phys_partition_ref;
64 int s_flags; 69 int s_flags;
65 struct inode *s_metadata_fe; 70 struct inode *s_metadata_fe;
66 struct inode *s_mirror_fe; 71 struct inode *s_mirror_fe;
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 70a41f742037..5731ccb42585 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
51 */ 51 */
52extern bool acpi_video_handles_brightness_key_presses(void); 52extern bool acpi_video_handles_brightness_key_presses(void);
53extern int acpi_video_get_levels(struct acpi_device *device, 53extern int acpi_video_get_levels(struct acpi_device *device,
54 struct acpi_video_device_brightness **dev_br); 54 struct acpi_video_device_brightness **dev_br,
55 int *pmax_level);
55#else 56#else
56static inline int acpi_video_register(void) { return 0; } 57static inline int acpi_video_register(void) { return 0; }
57static inline void acpi_video_unregister(void) { return; } 58static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
72 return false; 73 return false;
73} 74}
74static inline int acpi_video_get_levels(struct acpi_device *device, 75static inline int acpi_video_get_levels(struct acpi_device *device,
75 struct acpi_video_device_brightness **dev_br) 76 struct acpi_video_device_brightness **dev_br,
77 int *pmax_level)
76{ 78{
77 return -ENODEV; 79 return -ENODEV;
78} 80}
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 6bd05700d8c9..05f05f17a7c2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -22,37 +22,33 @@
22#include <asm-generic/qspinlock_types.h> 22#include <asm-generic/qspinlock_types.h>
23 23
24/** 24/**
25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
26 * @lock : Pointer to queued spinlock structure
27 *
28 * There is a very slight possibility of live-lock if the lockers keep coming
29 * and the waiter is just unfortunate enough to not see any unlock state.
30 */
31#ifndef queued_spin_unlock_wait
32extern void queued_spin_unlock_wait(struct qspinlock *lock);
33#endif
34
35/**
25 * queued_spin_is_locked - is the spinlock locked? 36 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure 37 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise 38 * Return: 1 if it is locked, 0 otherwise
28 */ 39 */
40#ifndef queued_spin_is_locked
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock) 41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{ 42{
31 /* 43 /*
32 * queued_spin_lock_slowpath() can ACQUIRE the lock before 44 * See queued_spin_unlock_wait().
33 * issuing the unordered store that sets _Q_LOCKED_VAL.
34 *
35 * See both smp_cond_acquire() sites for more detail.
36 *
37 * This however means that in code like:
38 *
39 * spin_lock(A) spin_lock(B)
40 * spin_unlock_wait(B) spin_is_locked(A)
41 * do_something() do_something()
42 *
43 * Both CPUs can end up running do_something() because the store
44 * setting _Q_LOCKED_VAL will pass through the loads in
45 * spin_unlock_wait() and/or spin_is_locked().
46 * 45 *
47 * Avoid this by issuing a full memory barrier between the spin_lock() 46 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
48 * and the loads in spin_unlock_wait() and spin_is_locked(). 47 * isn't immediately observable.
49 *
50 * Note that regular mutual exclusion doesn't care about this
51 * delayed store.
52 */ 48 */
53 smp_mb(); 49 return atomic_read(&lock->val);
54 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
55} 50}
51#endif
56 52
57/** 53/**
58 * queued_spin_value_unlocked - is the spinlock structure unlocked? 54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
122} 118}
123#endif 119#endif
124 120
125/**
126 * queued_spin_unlock_wait - wait until current lock holder releases the lock
127 * @lock : Pointer to queued spinlock structure
128 *
129 * There is a very slight possibility of live-lock if the lockers keep coming
130 * and the waiter is just unfortunate enough to not see any unlock state.
131 */
132static inline void queued_spin_unlock_wait(struct qspinlock *lock)
133{
134 /* See queued_spin_is_locked() */
135 smp_mb();
136 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
137 cpu_relax();
138}
139
140#ifndef virt_spin_lock 121#ifndef virt_spin_lock
141static __always_inline bool virt_spin_lock(struct qspinlock *lock) 122static __always_inline bool virt_spin_lock(struct qspinlock *lock)
142{ 123{
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 84f1a8eefbdb..cf918e3e6afb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -52,10 +52,12 @@
52#include <linux/poll.h> 52#include <linux/poll.h>
53#include <linux/ratelimit.h> 53#include <linux/ratelimit.h>
54#include <linux/sched.h> 54#include <linux/sched.h>
55#include <linux/seqlock.h>
55#include <linux/slab.h> 56#include <linux/slab.h>
56#include <linux/types.h> 57#include <linux/types.h>
57#include <linux/vmalloc.h> 58#include <linux/vmalloc.h>
58#include <linux/workqueue.h> 59#include <linux/workqueue.h>
60#include <linux/fence.h>
59 61
60#include <asm/mman.h> 62#include <asm/mman.h>
61#include <asm/pgalloc.h> 63#include <asm/pgalloc.h>
@@ -66,6 +68,7 @@
66 68
67#include <drm/drm_agpsupport.h> 69#include <drm/drm_agpsupport.h>
68#include <drm/drm_crtc.h> 70#include <drm/drm_crtc.h>
71#include <drm/drm_fourcc.h>
69#include <drm/drm_global.h> 72#include <drm/drm_global.h>
70#include <drm/drm_hashtab.h> 73#include <drm/drm_hashtab.h>
71#include <drm/drm_mem_util.h> 74#include <drm/drm_mem_util.h>
@@ -83,6 +86,7 @@ struct drm_local_map;
83struct drm_device_dma; 86struct drm_device_dma;
84struct drm_dma_handle; 87struct drm_dma_handle;
85struct drm_gem_object; 88struct drm_gem_object;
89struct drm_master;
86 90
87struct device_node; 91struct device_node;
88struct videomode; 92struct videomode;
@@ -281,13 +285,14 @@ struct drm_ioctl_desc {
281 285
282/* Event queued up for userspace to read */ 286/* Event queued up for userspace to read */
283struct drm_pending_event { 287struct drm_pending_event {
288 struct completion *completion;
284 struct drm_event *event; 289 struct drm_event *event;
290 struct fence *fence;
285 struct list_head link; 291 struct list_head link;
286 struct list_head pending_link; 292 struct list_head pending_link;
287 struct drm_file *file_priv; 293 struct drm_file *file_priv;
288 pid_t pid; /* pid of requester, no guarantee it's valid by the time 294 pid_t pid; /* pid of requester, no guarantee it's valid by the time
289 we deliver the event, for tracing only */ 295 we deliver the event, for tracing only */
290 void (*destroy)(struct drm_pending_event *event);
291}; 296};
292 297
293/* initial implementaton using a linked list - todo hashtab */ 298/* initial implementaton using a linked list - todo hashtab */
@@ -299,8 +304,6 @@ struct drm_prime_file_private {
299/** File private data */ 304/** File private data */
300struct drm_file { 305struct drm_file {
301 unsigned authenticated :1; 306 unsigned authenticated :1;
302 /* Whether we're master for a minor. Protected by master_mutex */
303 unsigned is_master :1;
304 /* true when the client has asked us to expose stereo 3D mode flags */ 307 /* true when the client has asked us to expose stereo 3D mode flags */
305 unsigned stereo_allowed :1; 308 unsigned stereo_allowed :1;
306 /* 309 /*
@@ -311,10 +314,10 @@ struct drm_file {
311 /* true if client understands atomic properties */ 314 /* true if client understands atomic properties */
312 unsigned atomic:1; 315 unsigned atomic:1;
313 /* 316 /*
314 * This client is allowed to gain master privileges for @master. 317 * This client is the creator of @master.
315 * Protected by struct drm_device::master_mutex. 318 * Protected by struct drm_device::master_mutex.
316 */ 319 */
317 unsigned allowed_master:1; 320 unsigned is_master:1;
318 321
319 struct pid *pid; 322 struct pid *pid;
320 kuid_t uid; 323 kuid_t uid;
@@ -332,7 +335,7 @@ struct drm_file {
332 void *driver_priv; 335 void *driver_priv;
333 336
334 struct drm_master *master; /* master this node is currently associated with 337 struct drm_master *master; /* master this node is currently associated with
335 N.B. not always minor->master */ 338 N.B. not always dev->master */
336 /** 339 /**
337 * fbs - List of framebuffers associated with this file. 340 * fbs - List of framebuffers associated with this file.
338 * 341 *
@@ -371,32 +374,6 @@ struct drm_lock_data {
371 int idle_has_lock; 374 int idle_has_lock;
372}; 375};
373 376
374/**
375 * struct drm_master - drm master structure
376 *
377 * @refcount: Refcount for this master object.
378 * @minor: Link back to minor char device we are master for. Immutable.
379 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
380 * @unique_len: Length of unique field. Protected by drm_global_mutex.
381 * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
382 * @lock: DRI lock information.
383 * @driver_priv: Pointer to driver-private information.
384 */
385struct drm_master {
386 struct kref refcount;
387 struct drm_minor *minor;
388 char *unique;
389 int unique_len;
390 struct idr magic_map;
391 struct drm_lock_data lock;
392 void *driver_priv;
393};
394
395/* Size of ringbuffer for vblank timestamps. Just double-buffer
396 * in initial implementation.
397 */
398#define DRM_VBLANKTIME_RBSIZE 2
399
400/* Flags and return codes for get_vblank_timestamp() driver function. */ 377/* Flags and return codes for get_vblank_timestamp() driver function. */
401#define DRM_CALLED_FROM_VBLIRQ 1 378#define DRM_CALLED_FROM_VBLIRQ 1
402#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) 379#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -420,8 +397,6 @@ struct drm_driver {
420 void (*postclose) (struct drm_device *, struct drm_file *); 397 void (*postclose) (struct drm_device *, struct drm_file *);
421 void (*lastclose) (struct drm_device *); 398 void (*lastclose) (struct drm_device *);
422 int (*unload) (struct drm_device *); 399 int (*unload) (struct drm_device *);
423 int (*suspend) (struct drm_device *, pm_message_t state);
424 int (*resume) (struct drm_device *);
425 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 400 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
426 int (*dma_quiescent) (struct drm_device *); 401 int (*dma_quiescent) (struct drm_device *);
427 int (*context_dtor) (struct drm_device *dev, int context); 402 int (*context_dtor) (struct drm_device *dev, int context);
@@ -434,7 +409,7 @@ struct drm_driver {
434 * 409 *
435 * Driver callback for fetching a raw hardware vblank counter for @crtc. 410 * Driver callback for fetching a raw hardware vblank counter for @crtc.
436 * If a device doesn't have a hardware counter, the driver can simply 411 * If a device doesn't have a hardware counter, the driver can simply
437 * return the value of drm_vblank_count. The DRM core will account for 412 * use drm_vblank_no_hw_counter() function. The DRM core will account for
438 * missed vblank events while interrupts where disabled based on system 413 * missed vblank events while interrupts where disabled based on system
439 * timestamps. 414 * timestamps.
440 * 415 *
@@ -452,8 +427,8 @@ struct drm_driver {
452 * @pipe: which irq to enable 427 * @pipe: which irq to enable
453 * 428 *
454 * Enable vblank interrupts for @crtc. If the device doesn't have 429 * Enable vblank interrupts for @crtc. If the device doesn't have
455 * a hardware vblank counter, this routine should be a no-op, since 430 * a hardware vblank counter, the driver should use the
456 * interrupts will have to stay on to keep the count accurate. 431 * drm_vblank_no_hw_counter() function that keeps a virtual counter.
457 * 432 *
458 * RETURNS 433 * RETURNS
459 * Zero on success, appropriate errno if the given @crtc's vblank 434 * Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +442,8 @@ struct drm_driver {
467 * @pipe: which irq to enable 442 * @pipe: which irq to enable
468 * 443 *
469 * Disable vblank interrupts for @crtc. If the device doesn't have 444 * Disable vblank interrupts for @crtc. If the device doesn't have
470 * a hardware vblank counter, this routine should be a no-op, since 445 * a hardware vblank counter, the driver should use the
471 * interrupts will have to stay on to keep the count accurate. 446 * drm_vblank_no_hw_counter() function that keeps a virtual counter.
472 */ 447 */
473 void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); 448 void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
474 449
@@ -573,8 +548,7 @@ struct drm_driver {
573 548
574 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, 549 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
575 bool from_open); 550 bool from_open);
576 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, 551 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
577 bool from_release);
578 552
579 int (*debugfs_init)(struct drm_minor *minor); 553 int (*debugfs_init)(struct drm_minor *minor);
580 void (*debugfs_cleanup)(struct drm_minor *minor); 554 void (*debugfs_cleanup)(struct drm_minor *minor);
@@ -708,9 +682,6 @@ struct drm_minor {
708 682
709 struct list_head debugfs_list; 683 struct list_head debugfs_list;
710 struct mutex debugfs_lock; /* Protects debugfs_list. */ 684 struct mutex debugfs_lock; /* Protects debugfs_list. */
711
712 /* currently active master for this node. Protected by master_mutex */
713 struct drm_master *master;
714}; 685};
715 686
716 687
@@ -725,10 +696,10 @@ struct drm_vblank_crtc {
725 wait_queue_head_t queue; /**< VBLANK wait queue */ 696 wait_queue_head_t queue; /**< VBLANK wait queue */
726 struct timer_list disable_timer; /* delayed disable timer */ 697 struct timer_list disable_timer; /* delayed disable timer */
727 698
728 /* vblank counter, protected by dev->vblank_time_lock for writes */ 699 seqlock_t seqlock; /* protects vblank count and time */
729 u32 count; 700
730 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 701 u32 count; /* vblank counter */
731 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 702 struct timeval time; /* vblank timestamp */
732 703
733 atomic_t refcount; /* number of users of vblank interruptsper crtc */ 704 atomic_t refcount; /* number of users of vblank interruptsper crtc */
734 u32 last; /* protected by dev->vbl_lock, used */ 705 u32 last; /* protected by dev->vbl_lock, used */
@@ -759,6 +730,10 @@ struct drm_device {
759 struct drm_minor *control; /**< Control node */ 730 struct drm_minor *control; /**< Control node */
760 struct drm_minor *primary; /**< Primary node */ 731 struct drm_minor *primary; /**< Primary node */
761 struct drm_minor *render; /**< Render node */ 732 struct drm_minor *render; /**< Render node */
733
734 /* currently active master for this device. Protected by master_mutex */
735 struct drm_master *master;
736
762 atomic_t unplugged; /**< Flag whether dev is dead */ 737 atomic_t unplugged; /**< Flag whether dev is dead */
763 struct inode *anon_inode; /**< inode for private address-space */ 738 struct inode *anon_inode; /**< inode for private address-space */
764 char *unique; /**< unique name of the device */ 739 char *unique; /**< unique name of the device */
@@ -928,7 +903,6 @@ int drm_open(struct inode *inode, struct file *filp);
928ssize_t drm_read(struct file *filp, char __user *buffer, 903ssize_t drm_read(struct file *filp, char __user *buffer,
929 size_t count, loff_t *offset); 904 size_t count, loff_t *offset);
930int drm_release(struct inode *inode, struct file *filp); 905int drm_release(struct inode *inode, struct file *filp);
931int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
932unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 906unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
933int drm_event_reserve_init_locked(struct drm_device *dev, 907int drm_event_reserve_init_locked(struct drm_device *dev,
934 struct drm_file *file_priv, 908 struct drm_file *file_priv,
@@ -972,18 +946,12 @@ extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
972 struct timeval *vblanktime); 946 struct timeval *vblanktime);
973extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 947extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
974 struct timeval *vblanktime); 948 struct timeval *vblanktime);
975extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
976 struct drm_pending_vblank_event *e);
977extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 949extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
978 struct drm_pending_vblank_event *e); 950 struct drm_pending_vblank_event *e);
979extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
980 struct drm_pending_vblank_event *e);
981extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 951extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
982 struct drm_pending_vblank_event *e); 952 struct drm_pending_vblank_event *e);
983extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); 953extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
984extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 954extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
985extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
986extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
987extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 955extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
988extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 956extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
989extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); 957extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
@@ -994,6 +962,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
994extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 962extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
995extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 963extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
996extern void drm_vblank_cleanup(struct drm_device *dev); 964extern void drm_vblank_cleanup(struct drm_device *dev);
965extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
997extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); 966extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
998 967
999extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 968extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
@@ -1020,14 +989,10 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
1020extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe); 989extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
1021extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe); 990extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
1022 991
1023 /* Stub support (drm_stub.h) */ 992/* drm_drv.c */
1024extern struct drm_master *drm_master_get(struct drm_master *master); 993void drm_put_dev(struct drm_device *dev);
1025extern void drm_master_put(struct drm_master **master); 994void drm_unplug_dev(struct drm_device *dev);
1026
1027extern void drm_put_dev(struct drm_device *dev);
1028extern void drm_unplug_dev(struct drm_device *dev);
1029extern unsigned int drm_debug; 995extern unsigned int drm_debug;
1030extern bool drm_atomic;
1031 996
1032 /* Debugfs support */ 997 /* Debugfs support */
1033#if defined(CONFIG_DEBUG_FS) 998#if defined(CONFIG_DEBUG_FS)
@@ -1078,11 +1043,13 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1078 1043
1079struct drm_device *drm_dev_alloc(struct drm_driver *driver, 1044struct drm_device *drm_dev_alloc(struct drm_driver *driver,
1080 struct device *parent); 1045 struct device *parent);
1046int drm_dev_init(struct drm_device *dev,
1047 struct drm_driver *driver,
1048 struct device *parent);
1081void drm_dev_ref(struct drm_device *dev); 1049void drm_dev_ref(struct drm_device *dev);
1082void drm_dev_unref(struct drm_device *dev); 1050void drm_dev_unref(struct drm_device *dev);
1083int drm_dev_register(struct drm_device *dev, unsigned long flags); 1051int drm_dev_register(struct drm_device *dev, unsigned long flags);
1084void drm_dev_unregister(struct drm_device *dev); 1052void drm_dev_unregister(struct drm_device *dev);
1085int drm_dev_set_unique(struct drm_device *dev, const char *name);
1086 1053
1087struct drm_minor *drm_minor_acquire(unsigned int minor_id); 1054struct drm_minor *drm_minor_acquire(unsigned int minor_id);
1088void drm_minor_release(struct drm_minor *minor); 1055void drm_minor_release(struct drm_minor *minor);
@@ -1135,7 +1102,6 @@ extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
1135 1102
1136/* platform section */ 1103/* platform section */
1137extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); 1104extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
1138extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
1139 1105
1140/* returns true if currently okay to sleep */ 1106/* returns true if currently okay to sleep */
1141static __inline__ bool drm_can_sleep(void) 1107static __inline__ bool drm_can_sleep(void)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 92c84e9ab09a..856a9c85a838 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,12 @@
30 30
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32 32
33void drm_crtc_commit_put(struct drm_crtc_commit *commit);
34static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
35{
36 kref_get(&commit->ref);
37}
38
33struct drm_atomic_state * __must_check 39struct drm_atomic_state * __must_check
34drm_atomic_state_alloc(struct drm_device *dev); 40drm_atomic_state_alloc(struct drm_device *dev);
35void drm_atomic_state_clear(struct drm_atomic_state *state); 41void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -71,7 +77,7 @@ static inline struct drm_crtc_state *
71drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, 77drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
72 struct drm_crtc *crtc) 78 struct drm_crtc *crtc)
73{ 79{
74 return state->crtc_states[drm_crtc_index(crtc)]; 80 return state->crtcs[drm_crtc_index(crtc)].state;
75} 81}
76 82
77/** 83/**
@@ -86,7 +92,7 @@ static inline struct drm_plane_state *
86drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, 92drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
87 struct drm_plane *plane) 93 struct drm_plane *plane)
88{ 94{
89 return state->plane_states[drm_plane_index(plane)]; 95 return state->planes[drm_plane_index(plane)].state;
90} 96}
91 97
92/** 98/**
@@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
106 if (index >= state->num_connector) 112 if (index >= state->num_connector)
107 return NULL; 113 return NULL;
108 114
109 return state->connector_states[index]; 115 return state->connectors[index].state;
116}
117
118/**
119 * __drm_atomic_get_current_plane_state - get current plane state
120 * @state: global atomic state object
121 * @plane: plane to grab
122 *
123 * This function returns the plane state for the given plane, either from
124 * @state, or if the plane isn't part of the atomic state update, from @plane.
125 * This is useful in atomic check callbacks, when drivers need to peek at, but
126 * not change, state of other planes, since it avoids threading an error code
127 * back up the call chain.
128 *
129 * WARNING:
130 *
131 * Note that this function is in general unsafe since it doesn't check for the
132 * required locking for access state structures. Drivers must ensure that it is
133 * safe to access the returned state structure through other means. One common
134 * example is when planes are fixed to a single CRTC, and the driver knows that
135 * the CRTC lock is held already. In that case holding the CRTC lock gives a
136 * read-lock on all planes connected to that CRTC. But if planes can be
137 * reassigned things get more tricky. In that case it's better to use
138 * drm_atomic_get_plane_state and wire up full error handling.
139 *
140 * Returns:
141 *
142 * Read-only pointer to the current plane state.
143 */
144static inline const struct drm_plane_state *
145__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
146 struct drm_plane *plane)
147{
148 if (state->planes[drm_plane_index(plane)].state)
149 return state->planes[drm_plane_index(plane)].state;
150
151 return plane->state;
110} 152}
111 153
112int __must_check 154int __must_check
@@ -139,29 +181,39 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
139int __must_check drm_atomic_commit(struct drm_atomic_state *state); 181int __must_check drm_atomic_commit(struct drm_atomic_state *state);
140int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); 182int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
141 183
142#define for_each_connector_in_state(state, connector, connector_state, __i) \ 184#define for_each_connector_in_state(__state, connector, connector_state, __i) \
143 for ((__i) = 0; \ 185 for ((__i) = 0; \
144 (__i) < (state)->num_connector && \ 186 (__i) < (__state)->num_connector && \
145 ((connector) = (state)->connectors[__i], \ 187 ((connector) = (__state)->connectors[__i].ptr, \
146 (connector_state) = (state)->connector_states[__i], 1); \ 188 (connector_state) = (__state)->connectors[__i].state, 1); \
147 (__i)++) \ 189 (__i)++) \
148 for_each_if (connector) 190 for_each_if (connector)
149 191
150#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ 192#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
151 for ((__i) = 0; \ 193 for ((__i) = 0; \
152 (__i) < (state)->dev->mode_config.num_crtc && \ 194 (__i) < (__state)->dev->mode_config.num_crtc && \
153 ((crtc) = (state)->crtcs[__i], \ 195 ((crtc) = (__state)->crtcs[__i].ptr, \
154 (crtc_state) = (state)->crtc_states[__i], 1); \ 196 (crtc_state) = (__state)->crtcs[__i].state, 1); \
155 (__i)++) \ 197 (__i)++) \
156 for_each_if (crtc_state) 198 for_each_if (crtc_state)
157 199
158#define for_each_plane_in_state(state, plane, plane_state, __i) \ 200#define for_each_plane_in_state(__state, plane, plane_state, __i) \
159 for ((__i) = 0; \ 201 for ((__i) = 0; \
160 (__i) < (state)->dev->mode_config.num_total_plane && \ 202 (__i) < (__state)->dev->mode_config.num_total_plane && \
161 ((plane) = (state)->planes[__i], \ 203 ((plane) = (__state)->planes[__i].ptr, \
162 (plane_state) = (state)->plane_states[__i], 1); \ 204 (plane_state) = (__state)->planes[__i].state, 1); \
163 (__i)++) \ 205 (__i)++) \
164 for_each_if (plane_state) 206 for_each_if (plane_state)
207
208/**
209 * drm_atomic_crtc_needs_modeset - compute combined modeset need
210 * @state: &drm_crtc_state for the CRTC
211 *
212 * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
213 * whether the state CRTC changed enough to need a full modeset cycle:
214 * connectors_changed, mode_changed and active_change. This helper simply
215 * combines these three to compute the overall need for a modeset for @state.
216 */
165static inline bool 217static inline bool
166drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) 218drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
167{ 219{
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d473dcc91f54..d86ae5dcd7b4 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -38,6 +38,7 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
38 struct drm_atomic_state *state); 38 struct drm_atomic_state *state);
39int drm_atomic_helper_check(struct drm_device *dev, 39int drm_atomic_helper_check(struct drm_device *dev,
40 struct drm_atomic_state *state); 40 struct drm_atomic_state *state);
41void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
41int drm_atomic_helper_commit(struct drm_device *dev, 42int drm_atomic_helper_commit(struct drm_device *dev,
42 struct drm_atomic_state *state, 43 struct drm_atomic_state *state,
43 bool nonblock); 44 bool nonblock);
@@ -71,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta
71void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, 72void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
72 bool atomic); 73 bool atomic);
73 74
74void drm_atomic_helper_swap_state(struct drm_device *dev, 75void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
75 struct drm_atomic_state *state); 76 bool stall);
77
78/* nonblocking commit helpers */
79int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
80 bool nonblock);
81void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
82void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
83void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
76 84
77/* implementations for legacy interfaces */ 85/* implementations for legacy interfaces */
78int drm_atomic_helper_update_plane(struct drm_plane *plane, 86int drm_atomic_helper_update_plane(struct drm_plane *plane,
@@ -147,9 +155,9 @@ void
147__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); 155__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
148void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, 156void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
149 struct drm_connector_state *state); 157 struct drm_connector_state *state);
150void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 158int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
151 u16 *red, u16 *green, u16 *blue, 159 u16 *red, u16 *green, u16 *blue,
152 uint32_t start, uint32_t size); 160 uint32_t size);
153 161
154/** 162/**
155 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC 163 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -159,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
159 * This iterates over the current state, useful (for example) when applying 167 * This iterates over the current state, useful (for example) when applying
160 * atomic state after it has been checked and swapped. To iterate over the 168 * atomic state after it has been checked and swapped. To iterate over the
161 * planes which *will* be attached (for ->atomic_check()) see 169 * planes which *will* be attached (for ->atomic_check()) see
162 * drm_crtc_for_each_pending_plane() 170 * drm_atomic_crtc_state_for_each_plane().
163 */ 171 */
164#define drm_atomic_crtc_for_each_plane(plane, crtc) \ 172#define drm_atomic_crtc_for_each_plane(plane, crtc) \
165 drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) 173 drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -171,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
171 * 179 *
172 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be 180 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
173 * attached if the specified state is applied. Useful during (for example) 181 * attached if the specified state is applied. Useful during (for example)
174 * ->atomic_check() operations, to validate the incoming state 182 * ->atomic_check() operations, to validate the incoming state.
175 */ 183 */
176#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ 184#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
177 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) 185 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
178 186
187/**
188 * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
189 * @plane: the loop cursor
190 * @plane_state: loop cursor for the plane's state, must be const
191 * @crtc_state: the incoming crtc-state
192 *
193 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
194 * attached if the specified state is applied. Useful during (for example)
195 * ->atomic_check() operations, to validate the incoming state.
196 *
197 * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
198 * const plane_state. This is useful when a driver just wants to peek at other
199 * active planes on this crtc, but does not need to change it.
200 */
201#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
202 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
203 for_each_if ((plane_state = \
204 __drm_atomic_get_current_plane_state((crtc_state)->state, \
205 plane)))
206
179/* 207/*
180 * drm_atomic_plane_disabling - check whether a plane is being disabled 208 * drm_atomic_plane_disabling - check whether a plane is being disabled
181 * @plane: plane object 209 * @plane: plane object
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
new file mode 100644
index 000000000000..610223b0481b
--- /dev/null
+++ b/include/drm/drm_auth.h
@@ -0,0 +1,59 @@
1/*
2 * Internal Header for the Direct Rendering Manager
3 *
4 * Copyright 2016 Intel Corporation
5 *
6 * Author: Daniel Vetter <daniel.vetter@ffwll.ch>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _DRM_AUTH_H_
29#define _DRM_AUTH_H_
30
31/**
32 * struct drm_master - drm master structure
33 *
34 * @refcount: Refcount for this master object.
35 * @dev: Link back to the DRM device
36 * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
37 * @unique_len: Length of unique field. Protected by drm_global_mutex.
38 * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
39 * @lock: DRI lock information.
40 * @driver_priv: Pointer to driver-private information.
41 *
42 * Note that master structures are only relevant for the legacy/primary device
43 * nodes, hence there can only be one per device, not one per drm_minor.
44 */
45struct drm_master {
46 struct kref refcount;
47 struct drm_device *dev;
48 char *unique;
49 int unique_len;
50 struct idr magic_map;
51 struct drm_lock_data lock;
52 void *driver_priv;
53};
54
55struct drm_master *drm_master_get(struct drm_master *master);
56void drm_master_put(struct drm_master **master);
57bool drm_is_current_master(struct drm_file *fpriv);
58
59#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index d1559cd04e3d..f5469d3a46dd 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -44,6 +44,7 @@ struct drm_file;
44struct drm_clip_rect; 44struct drm_clip_rect;
45struct device_node; 45struct device_node;
46struct fence; 46struct fence;
47struct edid;
47 48
48struct drm_mode_object { 49struct drm_mode_object {
49 uint32_t id; 50 uint32_t id;
@@ -253,6 +254,8 @@ struct drm_framebuffer {
253 int bits_per_pixel; 254 int bits_per_pixel;
254 int flags; 255 int flags;
255 uint32_t pixel_format; /* fourcc format */ 256 uint32_t pixel_format; /* fourcc format */
257 int hot_x;
258 int hot_y;
256 struct list_head filp_head; 259 struct list_head filp_head;
257}; 260};
258 261
@@ -314,6 +317,7 @@ struct drm_plane_helper_funcs;
314 * update to ensure framebuffer cleanup isn't done too early 317 * update to ensure framebuffer cleanup isn't done too early
315 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings 318 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
316 * @mode: current mode timings 319 * @mode: current mode timings
320 * @mode_blob: &drm_property_blob for @mode
317 * @degamma_lut: Lookup table for converting framebuffer pixel data 321 * @degamma_lut: Lookup table for converting framebuffer pixel data
318 * before apply the conversion matrix 322 * before apply the conversion matrix
319 * @ctm: Transformation matrix 323 * @ctm: Transformation matrix
@@ -478,8 +482,8 @@ struct drm_crtc_funcs {
478 * going on, which should eventually be unified to just one set of 482 * going on, which should eventually be unified to just one set of
479 * hooks. 483 * hooks.
480 */ 484 */
481 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 485 int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
482 uint32_t start, uint32_t size); 486 uint32_t size);
483 487
484 /** 488 /**
485 * @destroy: 489 * @destroy:
@@ -701,6 +705,32 @@ struct drm_crtc_funcs {
701 const struct drm_crtc_state *state, 705 const struct drm_crtc_state *state,
702 struct drm_property *property, 706 struct drm_property *property,
703 uint64_t *val); 707 uint64_t *val);
708
709 /**
710 * @late_register:
711 *
712 * This optional hook can be used to register additional userspace
713 * interfaces attached to the crtc like debugfs interfaces.
714 * It is called late in the driver load sequence from drm_dev_register().
715 * Everything added from this callback should be unregistered in
716 * the early_unregister callback.
717 *
718 * Returns:
719 *
720 * 0 on success, or a negative error code on failure.
721 */
722 int (*late_register)(struct drm_crtc *crtc);
723
724 /**
725 * @early_unregister:
726 *
727 * This optional hook should be used to unregister the additional
728 * userspace interfaces attached to the crtc from
729 * late_unregister(). It is called from drm_dev_unregister(),
730 * early in the driver unload sequence to disable userspace access
731 * before data structures are torndown.
732 */
733 void (*early_unregister)(struct drm_crtc *crtc);
704}; 734};
705 735
706/** 736/**
@@ -708,6 +738,7 @@ struct drm_crtc_funcs {
708 * @dev: parent DRM device 738 * @dev: parent DRM device
709 * @port: OF node used by drm_of_find_possible_crtcs() 739 * @port: OF node used by drm_of_find_possible_crtcs()
710 * @head: list management 740 * @head: list management
741 * @name: human readable name, can be overwritten by the driver
711 * @mutex: per-CRTC locking 742 * @mutex: per-CRTC locking
712 * @base: base KMS object for ID tracking etc. 743 * @base: base KMS object for ID tracking etc.
713 * @primary: primary plane for this CRTC 744 * @primary: primary plane for this CRTC
@@ -724,9 +755,6 @@ struct drm_crtc_funcs {
724 * @gamma_store: gamma ramp values 755 * @gamma_store: gamma ramp values
725 * @helper_private: mid-layer private data 756 * @helper_private: mid-layer private data
726 * @properties: property tracking for this CRTC 757 * @properties: property tracking for this CRTC
727 * @state: current atomic state for this CRTC
728 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
729 * legacy IOCTLs
730 * 758 *
731 * Each CRTC may have one or more connectors associated with it. This structure 759 * Each CRTC may have one or more connectors associated with it. This structure
732 * allows the CRTC to be controlled. 760 * allows the CRTC to be controlled.
@@ -738,12 +766,13 @@ struct drm_crtc {
738 766
739 char *name; 767 char *name;
740 768
741 /* 769 /**
742 * crtc mutex 770 * @mutex:
743 * 771 *
744 * This provides a read lock for the overall crtc state (mode, dpms 772 * This provides a read lock for the overall crtc state (mode, dpms
745 * state, ...) and a write lock for everything which can be update 773 * state, ...) and a write lock for everything which can be update
746 * without a full modeset (fb, cursor data, ...) 774 * without a full modeset (fb, cursor data, crtc properties ...). Full
775 * modeset also need to grab dev->mode_config.connection_mutex.
747 */ 776 */
748 struct drm_modeset_lock mutex; 777 struct drm_modeset_lock mutex;
749 778
@@ -753,6 +782,9 @@ struct drm_crtc {
753 struct drm_plane *primary; 782 struct drm_plane *primary;
754 struct drm_plane *cursor; 783 struct drm_plane *cursor;
755 784
785 /* position inside the mode_config.list, can be used as a [] idx */
786 unsigned index;
787
756 /* position of cursor plane on crtc */ 788 /* position of cursor plane on crtc */
757 int cursor_x; 789 int cursor_x;
758 int cursor_y; 790 int cursor_y;
@@ -779,11 +811,37 @@ struct drm_crtc {
779 811
780 struct drm_object_properties properties; 812 struct drm_object_properties properties;
781 813
814 /**
815 * @state:
816 *
817 * Current atomic state for this CRTC.
818 */
782 struct drm_crtc_state *state; 819 struct drm_crtc_state *state;
783 820
784 /* 821 /**
785 * For legacy crtc IOCTLs so that atomic drivers can get at the locking 822 * @commit_list:
786 * acquire context. 823 *
824 * List of &drm_crtc_commit structures tracking pending commits.
825 * Protected by @commit_lock. This list doesn't hold its own full
826 * reference, but burrows it from the ongoing commit. Commit entries
827 * must be removed from this list once the commit is fully completed,
828 * but before it's correspoding &drm_atomic_state gets destroyed.
829 */
830 struct list_head commit_list;
831
832 /**
833 * @commit_lock:
834 *
835 * Spinlock to protect @commit_list.
836 */
837 spinlock_t commit_lock;
838
839 /**
840 * @acquire_ctx:
841 *
842 * Per-CRTC implicit acquire context used by atomic drivers for legacy
843 * IOCTLs, so that atomic drivers can get at the locking acquire
844 * context.
787 */ 845 */
788 struct drm_modeset_acquire_ctx *acquire_ctx; 846 struct drm_modeset_acquire_ctx *acquire_ctx;
789}; 847};
@@ -926,6 +984,33 @@ struct drm_connector_funcs {
926 uint64_t val); 984 uint64_t val);
927 985
928 /** 986 /**
987 * @late_register:
988 *
989 * This optional hook can be used to register additional userspace
990 * interfaces attached to the connector, light backlight control, i2c,
991 * DP aux or similar interfaces. It is called late in the driver load
992 * sequence from drm_connector_register() when registering all the
993 * core drm connector interfaces. Everything added from this callback
994 * should be unregistered in the early_unregister callback.
995 *
996 * Returns:
997 *
998 * 0 on success, or a negative error code on failure.
999 */
1000 int (*late_register)(struct drm_connector *connector);
1001
1002 /**
1003 * @early_unregister:
1004 *
1005 * This optional hook should be used to unregister the additional
1006 * userspace interfaces attached to the connector from
1007 * late_unregister(). It is called from drm_connector_unregister(),
1008 * early in the driver unload sequence to disable userspace access
1009 * before data structures are torndown.
1010 */
1011 void (*early_unregister)(struct drm_connector *connector);
1012
1013 /**
929 * @destroy: 1014 * @destroy:
930 * 1015 *
931 * Clean up connector resources. This is called at driver unload time 1016 * Clean up connector resources. This is called at driver unload time
@@ -1069,6 +1154,32 @@ struct drm_encoder_funcs {
1069 * hotplugged in DRM. 1154 * hotplugged in DRM.
1070 */ 1155 */
1071 void (*destroy)(struct drm_encoder *encoder); 1156 void (*destroy)(struct drm_encoder *encoder);
1157
1158 /**
1159 * @late_register:
1160 *
1161 * This optional hook can be used to register additional userspace
1162 * interfaces attached to the encoder like debugfs interfaces.
1163 * It is called late in the driver load sequence from drm_dev_register().
1164 * Everything added from this callback should be unregistered in
1165 * the early_unregister callback.
1166 *
1167 * Returns:
1168 *
1169 * 0 on success, or a negative error code on failure.
1170 */
1171 int (*late_register)(struct drm_encoder *encoder);
1172
1173 /**
1174 * @early_unregister:
1175 *
1176 * This optional hook should be used to unregister the additional
1177 * userspace interfaces attached to the encoder from
1178 * late_unregister(). It is called from drm_dev_unregister(),
1179 * early in the driver unload sequence to disable userspace access
1180 * before data structures are torndown.
1181 */
1182 void (*early_unregister)(struct drm_encoder *encoder);
1072}; 1183};
1073 1184
1074#define DRM_CONNECTOR_MAX_ENCODER 3 1185#define DRM_CONNECTOR_MAX_ENCODER 3
@@ -1078,7 +1189,7 @@ struct drm_encoder_funcs {
1078 * @dev: parent DRM device 1189 * @dev: parent DRM device
1079 * @head: list management 1190 * @head: list management
1080 * @base: base KMS object 1191 * @base: base KMS object
1081 * @name: encoder name 1192 * @name: human readable name, can be overwritten by the driver
1082 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h 1193 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
1083 * @possible_crtcs: bitmask of potential CRTC bindings 1194 * @possible_crtcs: bitmask of potential CRTC bindings
1084 * @possible_clones: bitmask of potential sibling encoders for cloning 1195 * @possible_clones: bitmask of potential sibling encoders for cloning
@@ -1097,6 +1208,10 @@ struct drm_encoder {
1097 struct drm_mode_object base; 1208 struct drm_mode_object base;
1098 char *name; 1209 char *name;
1099 int encoder_type; 1210 int encoder_type;
1211
1212 /* position inside the mode_config.list, can be used as a [] idx */
1213 unsigned index;
1214
1100 uint32_t possible_crtcs; 1215 uint32_t possible_crtcs;
1101 uint32_t possible_clones; 1216 uint32_t possible_clones;
1102 1217
@@ -1124,12 +1239,14 @@ struct drm_encoder {
1124 * @attr: sysfs attributes 1239 * @attr: sysfs attributes
1125 * @head: list management 1240 * @head: list management
1126 * @base: base KMS object 1241 * @base: base KMS object
1127 * @name: connector name 1242 * @name: human readable name, can be overwritten by the driver
1243 * @connector_id: compacted connector id useful indexing arrays
1128 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h 1244 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
1129 * @connector_type_id: index into connector type enum 1245 * @connector_type_id: index into connector type enum
1130 * @interlace_allowed: can this connector handle interlaced modes? 1246 * @interlace_allowed: can this connector handle interlaced modes?
1131 * @doublescan_allowed: can this connector handle doublescan? 1247 * @doublescan_allowed: can this connector handle doublescan?
1132 * @stereo_allowed: can this connector handle stereo modes? 1248 * @stereo_allowed: can this connector handle stereo modes?
1249 * @registered: is this connector exposed (registered) with userspace?
1133 * @modes: modes available on this connector (from fill_modes() + user) 1250 * @modes: modes available on this connector (from fill_modes() + user)
1134 * @status: one of the drm_connector_status enums (connected, not, or unknown) 1251 * @status: one of the drm_connector_status enums (connected, not, or unknown)
1135 * @probed_modes: list of modes derived directly from the display 1252 * @probed_modes: list of modes derived directly from the display
@@ -1137,7 +1254,6 @@ struct drm_encoder {
1137 * @funcs: connector control functions 1254 * @funcs: connector control functions
1138 * @edid_blob_ptr: DRM property containing EDID if present 1255 * @edid_blob_ptr: DRM property containing EDID if present
1139 * @properties: property tracking for this connector 1256 * @properties: property tracking for this connector
1140 * @path_blob_ptr: DRM blob property data for the DP MST path property
1141 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling 1257 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
1142 * @dpms: current dpms state 1258 * @dpms: current dpms state
1143 * @helper_private: mid-layer private data 1259 * @helper_private: mid-layer private data
@@ -1187,6 +1303,7 @@ struct drm_connector {
1187 bool interlace_allowed; 1303 bool interlace_allowed;
1188 bool doublescan_allowed; 1304 bool doublescan_allowed;
1189 bool stereo_allowed; 1305 bool stereo_allowed;
1306 bool registered;
1190 struct list_head modes; /* list of modes on this connector */ 1307 struct list_head modes; /* list of modes on this connector */
1191 1308
1192 enum drm_connector_status status; 1309 enum drm_connector_status status;
@@ -1200,8 +1317,23 @@ struct drm_connector {
1200 struct drm_property_blob *edid_blob_ptr; 1317 struct drm_property_blob *edid_blob_ptr;
1201 struct drm_object_properties properties; 1318 struct drm_object_properties properties;
1202 1319
1320 /**
1321 * @path_blob_ptr:
1322 *
1323 * DRM blob property data for the DP MST path property.
1324 */
1203 struct drm_property_blob *path_blob_ptr; 1325 struct drm_property_blob *path_blob_ptr;
1204 1326
1327 /**
1328 * @tile_blob_ptr:
1329 *
1330 * DRM blob property data for the tile property (used mostly by DP MST).
1331 * This is meant for screens which are driven through separate display
1332 * pipelines represented by &drm_crtc, which might not be running with
1333 * genlocked clocks. For tiled panels which are genlocked, like
1334 * dual-link LVDS or dual-link DSI, the driver should try to not expose
1335 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
1336 */
1205 struct drm_property_blob *tile_blob_ptr; 1337 struct drm_property_blob *tile_blob_ptr;
1206 1338
1207 uint8_t polled; /* DRM_CONNECTOR_POLL_* */ 1339 uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -1263,6 +1395,7 @@ struct drm_connector {
1263 * plane (in 16.16) 1395 * plane (in 16.16)
1264 * @src_w: width of visible portion of plane (in 16.16) 1396 * @src_w: width of visible portion of plane (in 16.16)
1265 * @src_h: height of visible portion of plane (in 16.16) 1397 * @src_h: height of visible portion of plane (in 16.16)
1398 * @rotation: rotation of the plane
1266 * @state: backpointer to global drm_atomic_state 1399 * @state: backpointer to global drm_atomic_state
1267 */ 1400 */
1268struct drm_plane_state { 1401struct drm_plane_state {
@@ -1490,6 +1623,31 @@ struct drm_plane_funcs {
1490 const struct drm_plane_state *state, 1623 const struct drm_plane_state *state,
1491 struct drm_property *property, 1624 struct drm_property *property,
1492 uint64_t *val); 1625 uint64_t *val);
1626 /**
1627 * @late_register:
1628 *
1629 * This optional hook can be used to register additional userspace
1630 * interfaces attached to the plane like debugfs interfaces.
1631 * It is called late in the driver load sequence from drm_dev_register().
1632 * Everything added from this callback should be unregistered in
1633 * the early_unregister callback.
1634 *
1635 * Returns:
1636 *
1637 * 0 on success, or a negative error code on failure.
1638 */
1639 int (*late_register)(struct drm_plane *plane);
1640
1641 /**
1642 * @early_unregister:
1643 *
1644 * This optional hook should be used to unregister the additional
1645 * userspace interfaces attached to the plane from
1646 * late_unregister(). It is called from drm_dev_unregister(),
1647 * early in the driver unload sequence to disable userspace access
1648 * before data structures are torndown.
1649 */
1650 void (*early_unregister)(struct drm_plane *plane);
1493}; 1651};
1494 1652
1495enum drm_plane_type { 1653enum drm_plane_type {
@@ -1503,6 +1661,7 @@ enum drm_plane_type {
1503 * struct drm_plane - central DRM plane control structure 1661 * struct drm_plane - central DRM plane control structure
1504 * @dev: DRM device this plane belongs to 1662 * @dev: DRM device this plane belongs to
1505 * @head: for list management 1663 * @head: for list management
1664 * @name: human readable name, can be overwritten by the driver
1506 * @base: base mode object 1665 * @base: base mode object
1507 * @possible_crtcs: pipes this plane can be bound to 1666 * @possible_crtcs: pipes this plane can be bound to
1508 * @format_types: array of formats supported by this plane 1667 * @format_types: array of formats supported by this plane
@@ -1516,6 +1675,7 @@ enum drm_plane_type {
1516 * @properties: property tracking for this plane 1675 * @properties: property tracking for this plane
1517 * @type: type of plane (overlay, primary, cursor) 1676 * @type: type of plane (overlay, primary, cursor)
1518 * @state: current atomic state for this plane 1677 * @state: current atomic state for this plane
1678 * @helper_private: mid-layer private data
1519 */ 1679 */
1520struct drm_plane { 1680struct drm_plane {
1521 struct drm_device *dev; 1681 struct drm_device *dev;
@@ -1523,6 +1683,13 @@ struct drm_plane {
1523 1683
1524 char *name; 1684 char *name;
1525 1685
1686 /**
1687 * @mutex:
1688 *
1689 * Protects modeset plane state, together with the mutex of &drm_crtc
1690 * this plane is linked to (when active, getting actived or getting
1691 * disabled).
1692 */
1526 struct drm_modeset_lock mutex; 1693 struct drm_modeset_lock mutex;
1527 1694
1528 struct drm_mode_object base; 1695 struct drm_mode_object base;
@@ -1543,6 +1710,9 @@ struct drm_plane {
1543 1710
1544 enum drm_plane_type type; 1711 enum drm_plane_type type;
1545 1712
1713 /* position inside the mode_config.list, can be used as a [] idx */
1714 unsigned index;
1715
1546 const struct drm_plane_helper_funcs *helper_private; 1716 const struct drm_plane_helper_funcs *helper_private;
1547 1717
1548 struct drm_plane_state *state; 1718 struct drm_plane_state *state;
@@ -1694,18 +1864,136 @@ struct drm_bridge {
1694}; 1864};
1695 1865
1696/** 1866/**
1867 * struct drm_crtc_commit - track modeset commits on a CRTC
1868 *
1869 * This structure is used to track pending modeset changes and atomic commit on
1870 * a per-CRTC basis. Since updating the list should never block this structure
1871 * is reference counted to allow waiters to safely wait on an event to complete,
1872 * without holding any locks.
1873 *
1874 * It has 3 different events in total to allow a fine-grained synchronization
1875 * between outstanding updates::
1876 *
1877 * atomic commit thread hardware
1878 *
1879 * write new state into hardware ----> ...
1880 * signal hw_done
1881 * switch to new state on next
1882 * ... v/hblank
1883 *
1884 * wait for buffers to show up ...
1885 *
1886 * ... send completion irq
1887 * irq handler signals flip_done
1888 * cleanup old buffers
1889 *
1890 * signal cleanup_done
1891 *
1892 * wait for flip_done <----
1893 * clean up atomic state
1894 *
1895 * The important bit to know is that cleanup_done is the terminal event, but the
1896 * ordering between flip_done and hw_done is entirely up to the specific driver
1897 * and modeset state change.
1898 *
1899 * For an implementation of how to use this look at
1900 * drm_atomic_helper_setup_commit() from the atomic helper library.
1901 */
1902struct drm_crtc_commit {
1903 /**
1904 * @crtc:
1905 *
1906 * DRM CRTC for this commit.
1907 */
1908 struct drm_crtc *crtc;
1909
1910 /**
1911 * @ref:
1912 *
1913 * Reference count for this structure. Needed to allow blocking on
1914 * completions without the risk of the completion disappearing
1915 * meanwhile.
1916 */
1917 struct kref ref;
1918
1919 /**
1920 * @flip_done:
1921 *
1922 * Will be signaled when the hardware has flipped to the new set of
1923 * buffers. Signals at the same time as when the drm event for this
1924 * commit is sent to userspace, or when an out-fence is singalled. Note
1925 * that for most hardware, in most cases this happens after @hw_done is
1926 * signalled.
1927 */
1928 struct completion flip_done;
1929
1930 /**
1931 * @hw_done:
1932 *
1933 * Will be signalled when all hw register changes for this commit have
1934 * been written out. Especially when disabling a pipe this can be much
1935 * later than than @flip_done, since that can signal already when the
1936 * screen goes black, whereas to fully shut down a pipe more register
1937 * I/O is required.
1938 *
1939 * Note that this does not need to include separately reference-counted
1940 * resources like backing storage buffer pinning, or runtime pm
1941 * management.
1942 */
1943 struct completion hw_done;
1944
1945 /**
1946 * @cleanup_done:
1947 *
1948 * Will be signalled after old buffers have been cleaned up by calling
1949 * drm_atomic_helper_cleanup_planes(). Since this can only happen after
1950 * a vblank wait completed it might be a bit later. This completion is
1951 * useful to throttle updates and avoid hardware updates getting ahead
1952 * of the buffer cleanup too much.
1953 */
1954 struct completion cleanup_done;
1955
1956 /**
1957 * @commit_entry:
1958 *
1959 * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
1960 */
1961 struct list_head commit_entry;
1962
1963 /**
1964 * @event:
1965 *
1966 * &drm_pending_vblank_event pointer to clean up private events.
1967 */
1968 struct drm_pending_vblank_event *event;
1969};
1970
1971struct __drm_planes_state {
1972 struct drm_plane *ptr;
1973 struct drm_plane_state *state;
1974};
1975
1976struct __drm_crtcs_state {
1977 struct drm_crtc *ptr;
1978 struct drm_crtc_state *state;
1979 struct drm_crtc_commit *commit;
1980};
1981
1982struct __drm_connnectors_state {
1983 struct drm_connector *ptr;
1984 struct drm_connector_state *state;
1985};
1986
1987/**
1697 * struct drm_atomic_state - the global state object for atomic updates 1988 * struct drm_atomic_state - the global state object for atomic updates
1698 * @dev: parent DRM device 1989 * @dev: parent DRM device
1699 * @allow_modeset: allow full modeset 1990 * @allow_modeset: allow full modeset
1700 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics 1991 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
1701 * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL. 1992 * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
1702 * @planes: pointer to array of plane pointers 1993 * @planes: pointer to array of structures with per-plane data
1703 * @plane_states: pointer to array of plane states pointers
1704 * @crtcs: pointer to array of CRTC pointers 1994 * @crtcs: pointer to array of CRTC pointers
1705 * @crtc_states: pointer to array of CRTC states pointers
1706 * @num_connector: size of the @connectors and @connector_states arrays 1995 * @num_connector: size of the @connectors and @connector_states arrays
1707 * @connectors: pointer to array of connector pointers 1996 * @connectors: pointer to array of structures with per-connector data
1708 * @connector_states: pointer to array of connector states pointers
1709 * @acquire_ctx: acquire context for this atomic modeset state update 1997 * @acquire_ctx: acquire context for this atomic modeset state update
1710 */ 1998 */
1711struct drm_atomic_state { 1999struct drm_atomic_state {
@@ -1713,15 +2001,20 @@ struct drm_atomic_state {
1713 bool allow_modeset : 1; 2001 bool allow_modeset : 1;
1714 bool legacy_cursor_update : 1; 2002 bool legacy_cursor_update : 1;
1715 bool legacy_set_config : 1; 2003 bool legacy_set_config : 1;
1716 struct drm_plane **planes; 2004 struct __drm_planes_state *planes;
1717 struct drm_plane_state **plane_states; 2005 struct __drm_crtcs_state *crtcs;
1718 struct drm_crtc **crtcs;
1719 struct drm_crtc_state **crtc_states;
1720 int num_connector; 2006 int num_connector;
1721 struct drm_connector **connectors; 2007 struct __drm_connnectors_state *connectors;
1722 struct drm_connector_state **connector_states;
1723 2008
1724 struct drm_modeset_acquire_ctx *acquire_ctx; 2009 struct drm_modeset_acquire_ctx *acquire_ctx;
2010
2011 /**
2012 * @commit_work:
2013 *
2014 * Work item which can be used by the driver or helpers to execute the
2015 * commit without blocking.
2016 */
2017 struct work_struct commit_work;
1725}; 2018};
1726 2019
1727 2020
@@ -2022,8 +2315,6 @@ struct drm_mode_config_funcs {
2022 * @connection_mutex: ww mutex protecting connector state and routing 2315 * @connection_mutex: ww mutex protecting connector state and routing
2023 * @acquire_ctx: global implicit acquire context used by atomic drivers for 2316 * @acquire_ctx: global implicit acquire context used by atomic drivers for
2024 * legacy IOCTLs 2317 * legacy IOCTLs
2025 * @idr_mutex: mutex for KMS ID allocation and management
2026 * @crtc_idr: main KMS ID tracking object
2027 * @fb_lock: mutex to protect fb state and lists 2318 * @fb_lock: mutex to protect fb state and lists
2028 * @num_fb: number of fbs available 2319 * @num_fb: number of fbs available
2029 * @fb_list: list of framebuffers available 2320 * @fb_list: list of framebuffers available
@@ -2045,6 +2336,7 @@ struct drm_mode_config_funcs {
2045 * @fb_base: base address of the framebuffer 2336 * @fb_base: base address of the framebuffer
2046 * @poll_enabled: track polling support for this device 2337 * @poll_enabled: track polling support for this device
2047 * @poll_running: track polling status for this device 2338 * @poll_running: track polling status for this device
2339 * @delayed_event: track delayed poll uevent deliver for this device
2048 * @output_poll_work: delayed work for polling in process context 2340 * @output_poll_work: delayed work for polling in process context
2049 * @property_blob_list: list of all the blob property objects 2341 * @property_blob_list: list of all the blob property objects
2050 * @blob_lock: mutex for blob property allocation and management 2342 * @blob_lock: mutex for blob property allocation and management
@@ -2063,6 +2355,7 @@ struct drm_mode_config_funcs {
2063 * @async_page_flip: does this device support async flips on the primary plane? 2355 * @async_page_flip: does this device support async flips on the primary plane?
2064 * @cursor_width: hint to userspace for max cursor width 2356 * @cursor_width: hint to userspace for max cursor width
2065 * @cursor_height: hint to userspace for max cursor height 2357 * @cursor_height: hint to userspace for max cursor height
2358 * @helper_private: mid-layer private data
2066 * 2359 *
2067 * Core mode resource tracking structure. All CRTC, encoders, and connectors 2360 * Core mode resource tracking structure. All CRTC, encoders, and connectors
2068 * enumerated by the driver are added here, as are global properties. Some 2361 * enumerated by the driver are added here, as are global properties. Some
@@ -2072,10 +2365,30 @@ struct drm_mode_config {
2072 struct mutex mutex; /* protects configuration (mode lists etc.) */ 2365 struct mutex mutex; /* protects configuration (mode lists etc.) */
2073 struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ 2366 struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
2074 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ 2367 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
2075 struct mutex idr_mutex; /* for IDR management */ 2368
2076 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 2369 /**
2077 struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 2370 * @idr_mutex:
2078 /* this is limited to one for now */ 2371 *
2372 * Mutex for KMS ID allocation and management. Protects both @crtc_idr
2373 * and @tile_idr.
2374 */
2375 struct mutex idr_mutex;
2376
2377 /**
2378 * @crtc_idr:
2379 *
2380 * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
2381 * connector, modes - just makes life easier to have only one.
2382 */
2383 struct idr crtc_idr;
2384
2385 /**
2386 * @tile_idr:
2387 *
2388 * Use this idr for allocating new IDs for tiled sinks like use in some
2389 * high-res DP MST screens.
2390 */
2391 struct idr tile_idr;
2079 2392
2080 struct mutex fb_lock; /* proctects global and per-file fb lists */ 2393 struct mutex fb_lock; /* proctects global and per-file fb lists */
2081 int num_fb; 2394 int num_fb;
@@ -2177,11 +2490,17 @@ struct drm_mode_config {
2177 /* whether async page flip is supported or not */ 2490 /* whether async page flip is supported or not */
2178 bool async_page_flip; 2491 bool async_page_flip;
2179 2492
2180 /* whether the driver supports fb modifiers */ 2493 /**
2494 * @allow_fb_modifiers:
2495 *
2496 * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
2497 */
2181 bool allow_fb_modifiers; 2498 bool allow_fb_modifiers;
2182 2499
2183 /* cursor size */ 2500 /* cursor size */
2184 uint32_t cursor_width, cursor_height; 2501 uint32_t cursor_width, cursor_height;
2502
2503 struct drm_mode_config_helper_funcs *helper_private;
2185}; 2504};
2186 2505
2187/** 2506/**
@@ -2230,7 +2549,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
2230 const struct drm_crtc_funcs *funcs, 2549 const struct drm_crtc_funcs *funcs,
2231 const char *name, ...); 2550 const char *name, ...);
2232extern void drm_crtc_cleanup(struct drm_crtc *crtc); 2551extern void drm_crtc_cleanup(struct drm_crtc *crtc);
2233extern unsigned int drm_crtc_index(struct drm_crtc *crtc); 2552
2553/**
2554 * drm_crtc_index - find the index of a registered CRTC
2555 * @crtc: CRTC to find index for
2556 *
2557 * Given a registered CRTC, return the index of that CRTC within a DRM
2558 * device's list of CRTCs.
2559 */
2560static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
2561{
2562 return crtc->index;
2563}
2234 2564
2235/** 2565/**
2236 * drm_crtc_mask - find the mask of a registered CRTC 2566 * drm_crtc_mask - find the mask of a registered CRTC
@@ -2244,12 +2574,10 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
2244 return 1 << drm_crtc_index(crtc); 2574 return 1 << drm_crtc_index(crtc);
2245} 2575}
2246 2576
2247extern void drm_connector_ida_init(void); 2577int drm_connector_init(struct drm_device *dev,
2248extern void drm_connector_ida_destroy(void); 2578 struct drm_connector *connector,
2249extern int drm_connector_init(struct drm_device *dev, 2579 const struct drm_connector_funcs *funcs,
2250 struct drm_connector *connector, 2580 int connector_type);
2251 const struct drm_connector_funcs *funcs,
2252 int connector_type);
2253int drm_connector_register(struct drm_connector *connector); 2581int drm_connector_register(struct drm_connector *connector);
2254void drm_connector_unregister(struct drm_connector *connector); 2582void drm_connector_unregister(struct drm_connector *connector);
2255 2583
@@ -2263,28 +2591,23 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
2263extern int drm_connector_register_all(struct drm_device *dev); 2591extern int drm_connector_register_all(struct drm_device *dev);
2264extern void drm_connector_unregister_all(struct drm_device *dev); 2592extern void drm_connector_unregister_all(struct drm_device *dev);
2265 2593
2266extern int drm_bridge_add(struct drm_bridge *bridge);
2267extern void drm_bridge_remove(struct drm_bridge *bridge);
2268extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
2269extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
2270
2271bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
2272 const struct drm_display_mode *mode,
2273 struct drm_display_mode *adjusted_mode);
2274void drm_bridge_disable(struct drm_bridge *bridge);
2275void drm_bridge_post_disable(struct drm_bridge *bridge);
2276void drm_bridge_mode_set(struct drm_bridge *bridge,
2277 struct drm_display_mode *mode,
2278 struct drm_display_mode *adjusted_mode);
2279void drm_bridge_pre_enable(struct drm_bridge *bridge);
2280void drm_bridge_enable(struct drm_bridge *bridge);
2281
2282extern __printf(5, 6) 2594extern __printf(5, 6)
2283int drm_encoder_init(struct drm_device *dev, 2595int drm_encoder_init(struct drm_device *dev,
2284 struct drm_encoder *encoder, 2596 struct drm_encoder *encoder,
2285 const struct drm_encoder_funcs *funcs, 2597 const struct drm_encoder_funcs *funcs,
2286 int encoder_type, const char *name, ...); 2598 int encoder_type, const char *name, ...);
2287extern unsigned int drm_encoder_index(struct drm_encoder *encoder); 2599
2600/**
2601 * drm_encoder_index - find the index of a registered encoder
2602 * @encoder: encoder to find index for
2603 *
2604 * Given a registered encoder, return the index of that encoder within a DRM
2605 * device's list of encoders.
2606 */
2607static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
2608{
2609 return encoder->index;
2610}
2288 2611
2289/** 2612/**
2290 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 2613 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2315,17 +2638,22 @@ extern int drm_plane_init(struct drm_device *dev,
2315 const uint32_t *formats, unsigned int format_count, 2638 const uint32_t *formats, unsigned int format_count,
2316 bool is_primary); 2639 bool is_primary);
2317extern void drm_plane_cleanup(struct drm_plane *plane); 2640extern void drm_plane_cleanup(struct drm_plane *plane);
2318extern unsigned int drm_plane_index(struct drm_plane *plane); 2641
2642/**
2643 * drm_plane_index - find the index of a registered plane
2644 * @plane: plane to find index for
2645 *
2646 * Given a registered plane, return the index of that plane within a DRM
2647 * device's list of planes.
2648 */
2649static inline unsigned int drm_plane_index(struct drm_plane *plane)
2650{
2651 return plane->index;
2652}
2319extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); 2653extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
2320extern void drm_plane_force_disable(struct drm_plane *plane); 2654extern void drm_plane_force_disable(struct drm_plane *plane);
2321extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
2322 u32 format);
2323extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, 2655extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
2324 int *hdisplay, int *vdisplay); 2656 int *hdisplay, int *vdisplay);
2325extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2326 int x, int y,
2327 const struct drm_display_mode *mode,
2328 const struct drm_framebuffer *fb);
2329 2657
2330extern void drm_encoder_cleanup(struct drm_encoder *encoder); 2658extern void drm_encoder_cleanup(struct drm_encoder *encoder);
2331 2659
@@ -2336,16 +2664,6 @@ extern const char *drm_get_dvi_i_subconnector_name(int val);
2336extern const char *drm_get_dvi_i_select_name(int val); 2664extern const char *drm_get_dvi_i_select_name(int val);
2337extern const char *drm_get_tv_subconnector_name(int val); 2665extern const char *drm_get_tv_subconnector_name(int val);
2338extern const char *drm_get_tv_select_name(int val); 2666extern const char *drm_get_tv_select_name(int val);
2339extern void drm_fb_release(struct drm_file *file_priv);
2340extern void drm_property_destroy_user_blobs(struct drm_device *dev,
2341 struct drm_file *file_priv);
2342extern bool drm_probe_ddc(struct i2c_adapter *adapter);
2343extern struct edid *drm_get_edid(struct drm_connector *connector,
2344 struct i2c_adapter *adapter);
2345extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
2346 struct i2c_adapter *adapter);
2347extern struct edid *drm_edid_duplicate(const struct edid *edid);
2348extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
2349extern void drm_mode_config_init(struct drm_device *dev); 2667extern void drm_mode_config_init(struct drm_device *dev);
2350extern void drm_mode_config_reset(struct drm_device *dev); 2668extern void drm_mode_config_reset(struct drm_device *dev);
2351extern void drm_mode_config_cleanup(struct drm_device *dev); 2669extern void drm_mode_config_cleanup(struct drm_device *dev);
@@ -2369,13 +2687,6 @@ static inline bool drm_property_type_is(struct drm_property *property,
2369 return property->flags & type; 2687 return property->flags & type;
2370} 2688}
2371 2689
2372static inline bool drm_property_type_valid(struct drm_property *property)
2373{
2374 if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
2375 return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
2376 return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
2377}
2378
2379extern int drm_object_property_set_value(struct drm_mode_object *obj, 2690extern int drm_object_property_set_value(struct drm_mode_object *obj,
2380 struct drm_property *property, 2691 struct drm_property *property,
2381 uint64_t val); 2692 uint64_t val);
@@ -2433,86 +2744,15 @@ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
2433extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); 2744extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
2434extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 2745extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
2435extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev); 2746extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
2436extern bool drm_property_change_valid_get(struct drm_property *property,
2437 uint64_t value, struct drm_mode_object **ref);
2438extern void drm_property_change_valid_put(struct drm_property *property,
2439 struct drm_mode_object *ref);
2440 2747
2441extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 2748extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
2442 struct drm_encoder *encoder); 2749 struct drm_encoder *encoder);
2443extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 2750extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
2444 int gamma_size); 2751 int gamma_size);
2445extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
2446 uint32_t id, uint32_t type);
2447void drm_mode_object_reference(struct drm_mode_object *obj);
2448void drm_mode_object_unreference(struct drm_mode_object *obj);
2449 2752
2450/* IOCTLs */
2451extern int drm_mode_getresources(struct drm_device *dev,
2452 void *data, struct drm_file *file_priv);
2453extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
2454 struct drm_file *file_priv);
2455extern int drm_mode_getcrtc(struct drm_device *dev,
2456 void *data, struct drm_file *file_priv);
2457extern int drm_mode_getconnector(struct drm_device *dev,
2458 void *data, struct drm_file *file_priv);
2459extern int drm_mode_set_config_internal(struct drm_mode_set *set); 2753extern int drm_mode_set_config_internal(struct drm_mode_set *set);
2460extern int drm_mode_setcrtc(struct drm_device *dev, 2754
2461 void *data, struct drm_file *file_priv);
2462extern int drm_mode_getplane(struct drm_device *dev,
2463 void *data, struct drm_file *file_priv);
2464extern int drm_mode_setplane(struct drm_device *dev,
2465 void *data, struct drm_file *file_priv);
2466extern int drm_mode_cursor_ioctl(struct drm_device *dev,
2467 void *data, struct drm_file *file_priv);
2468extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
2469 void *data, struct drm_file *file_priv);
2470extern int drm_mode_addfb(struct drm_device *dev,
2471 void *data, struct drm_file *file_priv);
2472extern int drm_mode_addfb2(struct drm_device *dev,
2473 void *data, struct drm_file *file_priv);
2474extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); 2755extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
2475extern int drm_mode_rmfb(struct drm_device *dev,
2476 void *data, struct drm_file *file_priv);
2477extern int drm_mode_getfb(struct drm_device *dev,
2478 void *data, struct drm_file *file_priv);
2479extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2480 void *data, struct drm_file *file_priv);
2481
2482extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
2483 void *data, struct drm_file *file_priv);
2484extern int drm_mode_getblob_ioctl(struct drm_device *dev,
2485 void *data, struct drm_file *file_priv);
2486extern int drm_mode_createblob_ioctl(struct drm_device *dev,
2487 void *data, struct drm_file *file_priv);
2488extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
2489 void *data, struct drm_file *file_priv);
2490extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
2491 void *data, struct drm_file *file_priv);
2492extern int drm_mode_getencoder(struct drm_device *dev,
2493 void *data, struct drm_file *file_priv);
2494extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
2495 void *data, struct drm_file *file_priv);
2496extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
2497 void *data, struct drm_file *file_priv);
2498extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
2499extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
2500extern bool drm_detect_hdmi_monitor(struct edid *edid);
2501extern bool drm_detect_monitor_audio(struct edid *edid);
2502extern bool drm_rgb_quant_range_selectable(struct edid *edid);
2503extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
2504 void *data, struct drm_file *file_priv);
2505extern int drm_add_modes_noedid(struct drm_connector *connector,
2506 int hdisplay, int vdisplay);
2507extern void drm_set_preferred_mode(struct drm_connector *connector,
2508 int hpref, int vpref);
2509
2510extern int drm_edid_header_is_valid(const u8 *raw_edid);
2511extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
2512 bool *edid_corrupt);
2513extern bool drm_edid_is_valid(struct edid *edid);
2514extern void drm_edid_get_monitor_name(struct edid *edid, char *name,
2515 int buflen);
2516 2756
2517extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, 2757extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
2518 char topology[8]); 2758 char topology[8]);
@@ -2520,41 +2760,24 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
2520 char topology[8]); 2760 char topology[8]);
2521extern void drm_mode_put_tile_group(struct drm_device *dev, 2761extern void drm_mode_put_tile_group(struct drm_device *dev,
2522 struct drm_tile_group *tg); 2762 struct drm_tile_group *tg);
2523struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
2524 int hsize, int vsize, int fresh,
2525 bool rb);
2526 2763
2527extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
2528 void *data, struct drm_file *file_priv);
2529extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
2530 void *data, struct drm_file *file_priv);
2531extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
2532 void *data, struct drm_file *file_priv);
2533extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
2534 struct drm_file *file_priv);
2535extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
2536 struct drm_file *file_priv);
2537extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, 2764extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
2538 struct drm_property *property, 2765 struct drm_property *property,
2539 uint64_t value); 2766 uint64_t value);
2540extern int drm_mode_atomic_ioctl(struct drm_device *dev, 2767
2541 void *data, struct drm_file *file_priv);
2542
2543extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
2544 int *bpp);
2545extern int drm_format_num_planes(uint32_t format);
2546extern int drm_format_plane_cpp(uint32_t format, int plane);
2547extern int drm_format_horz_chroma_subsampling(uint32_t format);
2548extern int drm_format_vert_chroma_subsampling(uint32_t format);
2549extern int drm_format_plane_width(int width, uint32_t format, int plane);
2550extern int drm_format_plane_height(int height, uint32_t format, int plane);
2551extern const char *drm_get_format_name(uint32_t format);
2552extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, 2768extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
2553 unsigned int supported_rotations); 2769 unsigned int supported_rotations);
2554extern unsigned int drm_rotation_simplify(unsigned int rotation, 2770extern unsigned int drm_rotation_simplify(unsigned int rotation,
2555 unsigned int supported_rotations); 2771 unsigned int supported_rotations);
2556 2772extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
2773 uint degamma_lut_size,
2774 bool has_ctm,
2775 uint gamma_lut_size);
2557/* Helpers */ 2776/* Helpers */
2777struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
2778 uint32_t id, uint32_t type);
2779void drm_mode_object_reference(struct drm_mode_object *obj);
2780void drm_mode_object_unreference(struct drm_mode_object *obj);
2558 2781
2559static inline struct drm_plane *drm_plane_find(struct drm_device *dev, 2782static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
2560 uint32_t id) 2783 uint32_t id)
@@ -2720,4 +2943,50 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
2720 &fb->head != (&(dev)->mode_config.fb_list); \ 2943 &fb->head != (&(dev)->mode_config.fb_list); \
2721 fb = list_next_entry(fb, head)) 2944 fb = list_next_entry(fb, head))
2722 2945
2946/* drm_edid.c */
2947bool drm_probe_ddc(struct i2c_adapter *adapter);
2948struct edid *drm_get_edid(struct drm_connector *connector,
2949 struct i2c_adapter *adapter);
2950struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
2951 struct i2c_adapter *adapter);
2952struct edid *drm_edid_duplicate(const struct edid *edid);
2953int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
2954
2955u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
2956enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
2957bool drm_detect_hdmi_monitor(struct edid *edid);
2958bool drm_detect_monitor_audio(struct edid *edid);
2959bool drm_rgb_quant_range_selectable(struct edid *edid);
2960int drm_add_modes_noedid(struct drm_connector *connector,
2961 int hdisplay, int vdisplay);
2962void drm_set_preferred_mode(struct drm_connector *connector,
2963 int hpref, int vpref);
2964
2965int drm_edid_header_is_valid(const u8 *raw_edid);
2966bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
2967 bool *edid_corrupt);
2968bool drm_edid_is_valid(struct edid *edid);
2969void drm_edid_get_monitor_name(struct edid *edid, char *name,
2970 int buflen);
2971struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
2972 int hsize, int vsize, int fresh,
2973 bool rb);
2974
2975/* drm_bridge.c */
2976extern int drm_bridge_add(struct drm_bridge *bridge);
2977extern void drm_bridge_remove(struct drm_bridge *bridge);
2978extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
2979extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
2980
2981bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
2982 const struct drm_display_mode *mode,
2983 struct drm_display_mode *adjusted_mode);
2984void drm_bridge_disable(struct drm_bridge *bridge);
2985void drm_bridge_post_disable(struct drm_bridge *bridge);
2986void drm_bridge_mode_set(struct drm_bridge *bridge,
2987 struct drm_display_mode *mode,
2988 struct drm_display_mode *adjusted_mode);
2989void drm_bridge_pre_enable(struct drm_bridge *bridge);
2990void drm_bridge_enable(struct drm_bridge *bridge);
2991
2723#endif /* __DRM_CRTC_H__ */ 2992#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 97fa894d4ee2..4b37afa2b73b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
48 struct drm_display_mode *mode, 48 struct drm_display_mode *mode,
49 int x, int y, 49 int x, int y,
50 struct drm_framebuffer *old_fb); 50 struct drm_framebuffer *old_fb);
51extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
52 int degamma_lut_size,
53 int gamma_lut_size);
54extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); 51extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
55extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); 52extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
56 53
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9d03f167007b..4d85cf2874af 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
622#define DP_BRANCH_OUI_HEADER_SIZE 0xc 622#define DP_BRANCH_OUI_HEADER_SIZE 0xc
623#define DP_RECEIVER_CAP_SIZE 0xf 623#define DP_RECEIVER_CAP_SIZE 0xf
624#define EDP_PSR_RECEIVER_CAP_SIZE 2 624#define EDP_PSR_RECEIVER_CAP_SIZE 2
625#define EDP_DISPLAY_CTL_CAP_SIZE 3
625 626
626void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 627void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
627void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 628void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -804,6 +805,7 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
804int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); 805int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
805int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); 806int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
806 807
808void drm_dp_aux_init(struct drm_dp_aux *aux);
807int drm_dp_aux_register(struct drm_dp_aux *aux); 809int drm_dp_aux_register(struct drm_dp_aux *aux);
808void drm_dp_aux_unregister(struct drm_dp_aux *aux); 810void drm_dp_aux_unregister(struct drm_dp_aux *aux);
809 811
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index fd0dde9f0a6d..f313211f8ed5 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -23,6 +23,7 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
23 23
24void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); 24void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
25void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); 25void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
26void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
26int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, 27int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
27 struct drm_fb_helper_surface_size *sizes, 28 struct drm_fb_helper_surface_size *sizes,
28 const struct drm_framebuffer_funcs *funcs); 29 const struct drm_framebuffer_funcs *funcs);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5b4aa35026a3..db8d4780eaa2 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -212,17 +212,6 @@ struct drm_fb_helper {
212 * needs to be reprobe when fbdev is in control again. 212 * needs to be reprobe when fbdev is in control again.
213 */ 213 */
214 bool delayed_hotplug; 214 bool delayed_hotplug;
215
216 /**
217 * @atomic:
218 *
219 * Use atomic updates for restore_fbdev_mode(), etc. This defaults to
220 * true if driver has DRIVER_ATOMIC feature flag, but drivers can
221 * override it to true after drm_fb_helper_init() if they support atomic
222 * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
223 * does not require ASYNC commits).
224 */
225 bool atomic;
226}; 215};
227 216
228#ifdef CONFIG_DRM_FBDEV_EMULATION 217#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644
index 000000000000..7f90a396cf2b
--- /dev/null
+++ b/include/drm/drm_fourcc.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22#ifndef __DRM_FOURCC_H__
23#define __DRM_FOURCC_H__
24
25#include <linux/types.h>
26#include <uapi/drm/drm_fourcc.h>
27
28void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
29int drm_format_num_planes(uint32_t format);
30int drm_format_plane_cpp(uint32_t format, int plane);
31int drm_format_horz_chroma_subsampling(uint32_t format);
32int drm_format_vert_chroma_subsampling(uint32_t format);
33int drm_format_plane_width(int width, uint32_t format, int plane);
34int drm_format_plane_height(int height, uint32_t format, int plane);
35const char *drm_get_format_name(uint32_t format);
36
37#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index a5ef2c7e40f8..cf0e7d89bcdf 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -1,6 +1,8 @@
1#ifndef __DRM_DRM_LEGACY_H__ 1#ifndef __DRM_DRM_LEGACY_H__
2#define __DRM_DRM_LEGACY_H__ 2#define __DRM_DRM_LEGACY_H__
3 3
4#include <drm/drm_auth.h>
5
4/* 6/*
5 * Legacy driver interfaces for the Direct Rendering Manager 7 * Legacy driver interfaces for the Direct Rendering Manager
6 * 8 *
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 7a9840f8b38e..72f5b15e0738 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -180,6 +180,8 @@ struct mipi_dsi_device {
180 unsigned long mode_flags; 180 unsigned long mode_flags;
181}; 181};
182 182
183#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
184
183static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) 185static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
184{ 186{
185 return container_of(dev, struct mipi_dsi_device, dev); 187 return container_of(dev, struct mipi_dsi_device, dev);
@@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
263 u16 end); 265 u16 end);
264int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, 266int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
265 u16 end); 267 u16 end);
268int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param);
266int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); 269int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
267int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, 270int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
268 enum mipi_dsi_dcs_tear_mode mode); 271 enum mipi_dsi_dcs_tear_mode mode);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 625966a906f2..ff481770d76b 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -169,6 +169,8 @@ enum drm_mode_status {
169 * 169 *
170 * The horizontal and vertical timings are defined per the following diagram. 170 * The horizontal and vertical timings are defined per the following diagram.
171 * 171 *
172 * ::
173 *
172 * 174 *
173 * Active Front Sync Back 175 * Active Front Sync Back
174 * Region Porch Porch 176 * Region Porch Porch
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d4619dc2eecb..b55f21857a98 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
736 * inspect dynamic configuration state should instead use 736 * inspect dynamic configuration state should instead use
737 * @atomic_best_encoder. 737 * @atomic_best_encoder.
738 * 738 *
739 * You can leave this function to NULL if the connector is only
740 * attached to a single encoder and you are using the atomic helpers.
741 * In this case, the core will call drm_atomic_helper_best_encoder()
742 * for you.
743 *
739 * RETURNS: 744 * RETURNS:
740 * 745 *
741 * Encoder that should be used for the given connector and connector 746 * Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
752 * need to select the best encoder depending upon the desired 757 * need to select the best encoder depending upon the desired
753 * configuration and can't select it statically. 758 * configuration and can't select it statically.
754 * 759 *
755 * This function is used by drm_atomic_helper_check_modeset() and either 760 * This function is used by drm_atomic_helper_check_modeset().
756 * this or @best_encoder is required. 761 * If it is not implemented, the core will fallback to @best_encoder
762 * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
757 * 763 *
758 * NOTE: 764 * NOTE:
759 * 765 *
@@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
925 plane->helper_private = funcs; 931 plane->helper_private = funcs;
926} 932}
927 933
934/**
935 * struct drm_mode_config_helper_funcs - global modeset helper operations
936 *
937 * These helper functions are used by the atomic helpers.
938 */
939struct drm_mode_config_helper_funcs {
940 /**
941 * @atomic_commit_tail:
942 *
943 * This hook is used by the default atomic_commit() hook implemented in
944 * drm_atomic_helper_commit() together with the nonblocking commit
945 * helpers (see drm_atomic_helper_setup_commit() for a starting point)
946 * to implement blocking and nonblocking commits easily. It is not used
947 * by the atomic helpers
948 *
949 * This hook should first commit the given atomic state to the hardware.
950 * But drivers can add more waiting calls at the start of their
951 * implementation, e.g. to wait for driver-internal request for implicit
952 * syncing, before starting to commit the update to the hardware.
953 *
954 * After the atomic update is committed to the hardware this hook needs
955 * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
956 * to be executed by the hardware, for example using
957 * drm_atomic_helper_wait_for_vblanks(), and then clean up the old
958 * framebuffers using drm_atomic_helper_cleanup_planes().
959 *
960 * When disabling a CRTC this hook _must_ stall for the commit to
961 * complete. Vblank waits don't work on disabled CRTC, hence the core
962 * can't take care of this. And it also can't rely on the vblank event,
963 * since that can be signalled already when the screen shows black,
964 * which can happen much earlier than the last hardware access needed to
965 * shut off the display pipeline completely.
966 *
967 * This hook is optional, the default implementation is
968 * drm_atomic_helper_commit_tail().
969 */
970 void (*atomic_commit_tail)(struct drm_atomic_state *state);
971};
972
928#endif 973#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 4421f3f4ca8d..0e0c3573cce0 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -46,6 +46,7 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
46 struct drm_rect *src, 46 struct drm_rect *src,
47 struct drm_rect *dest, 47 struct drm_rect *dest,
48 const struct drm_rect *clip, 48 const struct drm_rect *clip,
49 unsigned int rotation,
49 int min_scale, 50 int min_scale,
50 int max_scale, 51 int max_scale,
51 bool can_position, 52 bool can_position,
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
new file mode 100644
index 000000000000..269039722f91
--- /dev/null
+++ b/include/drm/drm_simple_kms_helper.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
11#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
12
13struct drm_simple_display_pipe;
14
15/**
16 * struct drm_simple_display_pipe_funcs - helper operations for a simple
17 * display pipeline
18 */
19struct drm_simple_display_pipe_funcs {
20 /**
21 * @enable:
22 *
23 * This function should be used to enable the pipeline.
24 * It is called when the underlying crtc is enabled.
25 * This hook is optional.
26 */
27 void (*enable)(struct drm_simple_display_pipe *pipe,
28 struct drm_crtc_state *crtc_state);
29 /**
30 * @disable:
31 *
32 * This function should be used to disable the pipeline.
33 * It is called when the underlying crtc is disabled.
34 * This hook is optional.
35 */
36 void (*disable)(struct drm_simple_display_pipe *pipe);
37
38 /**
39 * @check:
40 *
41 * This function is called in the check phase of an atomic update,
42 * specifically when the underlying plane is checked.
43 * The simple display pipeline helpers already check that the plane is
44 * not scaled, fills the entire visible area and is always enabled
45 * when the crtc is also enabled.
46 * This hook is optional.
47 *
48 * RETURNS:
49 *
50 * 0 on success, -EINVAL if the state or the transition can't be
51 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
52 * attempt to obtain another state object ran into a &drm_modeset_lock
53 * deadlock.
54 */
55 int (*check)(struct drm_simple_display_pipe *pipe,
56 struct drm_plane_state *plane_state,
57 struct drm_crtc_state *crtc_state);
58 /**
59 * @update:
60 *
61 * This function is called when the underlying plane state is updated.
62 * This hook is optional.
63 */
64 void (*update)(struct drm_simple_display_pipe *pipe,
65 struct drm_plane_state *plane_state);
66};
67
68/**
69 * struct drm_simple_display_pipe - simple display pipeline
70 * @crtc: CRTC control structure
71 * @plane: Plane control structure
72 * @encoder: Encoder control structure
73 * @connector: Connector control structure
74 * @funcs: Pipeline control functions (optional)
75 *
76 * Simple display pipeline with plane, crtc and encoder collapsed into one
77 * entity. It should be initialized by calling drm_simple_display_pipe_init().
78 */
79struct drm_simple_display_pipe {
80 struct drm_crtc crtc;
81 struct drm_plane plane;
82 struct drm_encoder encoder;
83 struct drm_connector *connector;
84
85 const struct drm_simple_display_pipe_funcs *funcs;
86};
87
88int drm_simple_display_pipe_init(struct drm_device *dev,
89 struct drm_simple_display_pipe *pipe,
90 const struct drm_simple_display_pipe_funcs *funcs,
91 const uint32_t *formats, unsigned int format_count,
92 struct drm_connector *connector);
93
94#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 595f85c392ac..b1755f8db36b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
92#define I845_TSEG_SIZE_512K (2 << 1) 92#define I845_TSEG_SIZE_512K (2 << 1)
93#define I845_TSEG_SIZE_1M (3 << 1) 93#define I845_TSEG_SIZE_1M (3 << 1)
94 94
95#define INTEL_BSM 0x5c
96#define INTEL_BSM_MASK (0xFFFF << 20)
97
95#endif /* _I915_DRM_H_ */ 98#endif /* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 9e9bddaa58a5..f49edecd66a3 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -13,6 +13,9 @@ void intel_gmch_remove(void);
13bool intel_enable_gtt(void); 13bool intel_enable_gtt(void);
14 14
15void intel_gtt_chipset_flush(void); 15void intel_gtt_chipset_flush(void);
16void intel_gtt_insert_page(dma_addr_t addr,
17 unsigned int pg,
18 unsigned int flags);
16void intel_gtt_insert_sg_entries(struct sg_table *st, 19void intel_gtt_insert_sg_entries(struct sg_table *st,
17 unsigned int pg_start, 20 unsigned int pg_start,
18 unsigned int flags); 21 unsigned int flags);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 576e4639ca60..314b3caa701c 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -65,6 +65,7 @@ struct coredump_params {
65 unsigned long limit; 65 unsigned long limit;
66 unsigned long mm_flags; 66 unsigned long mm_flags;
67 loff_t written; 67 loff_t written;
68 loff_t pos;
68}; 69};
69 70
70/* 71/*
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 19b14862d3e0..1b3b6e155392 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -279,6 +279,11 @@ struct ceph_osd_client {
279 struct workqueue_struct *notify_wq; 279 struct workqueue_struct *notify_wq;
280}; 280};
281 281
282static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
283{
284 return osdc->osdmap->flags & flag;
285}
286
282extern int ceph_osdc_setup(void); 287extern int ceph_osdc_setup(void);
283extern void ceph_osdc_cleanup(void); 288extern void ceph_osdc_cleanup(void);
284 289
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ddc426b22d81..9ccf4dbe55f8 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
189 return !ceph_osd_is_up(map, osd); 189 return !ceph_osd_is_up(map, osd);
190} 190}
191 191
192static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
193{
194 return map && (map->flags & flag);
195}
196
197extern char *ceph_osdmap_state_str(char *str, int len, int state); 192extern char *ceph_osdmap_state_str(char *str, int len, int state);
198extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); 193extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
199 194
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0c72204c75fc..fb39d5add173 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ 25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ 26#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ 27#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
28#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */ 28 /* unused */
29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ 29#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ 30#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ 31#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 786ad32631a6..07b83d32f66c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
152extern int cpuidle_play_dead(void); 152extern int cpuidle_play_dead(void);
153 153
154extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 154extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
155static inline struct cpuidle_device *cpuidle_get_device(void)
156{return __this_cpu_read(cpuidle_devices); }
155#else 157#else
156static inline void disable_cpuidle(void) { } 158static inline void disable_cpuidle(void) { }
157static inline bool cpuidle_not_available(struct cpuidle_driver *drv, 159static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
187static inline int cpuidle_play_dead(void) {return -ENODEV; } 189static inline int cpuidle_play_dead(void) {return -ENODEV; }
188static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 190static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
189 struct cpuidle_device *dev) {return NULL; } 191 struct cpuidle_device *dev) {return NULL; }
192static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
190#endif 193#endif
191 194
192#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) 195#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 484c8792da82..f53fa055021a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -212,6 +212,7 @@ struct dentry_operations {
212#define DCACHE_OP_REAL 0x08000000 212#define DCACHE_OP_REAL 0x08000000
213 213
214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ 214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
215#define DCACHE_DENTRY_CURSOR 0x20000000
215 216
216extern seqlock_t rename_lock; 217extern seqlock_t rename_lock;
217 218
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
575 return inode; 576 return inode;
576} 577}
577 578
579/**
580 * d_real_inode - Return the real inode
581 * @dentry: The dentry to query
582 *
583 * If dentry is on an union/overlay, then return the underlying, real inode.
584 * Otherwise return d_inode().
585 */
586static inline struct inode *d_real_inode(struct dentry *dentry)
587{
588 return d_backing_inode(d_real(dentry));
589}
590
578 591
579#endif /* __LINUX_DCACHE_H */ 592#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 5871f292b596..277ab9af9ac2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,13 +15,12 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
20#ifdef CONFIG_UNIX98_PTYS 18#ifdef CONFIG_UNIX98_PTYS
21 19
22/* Look up a pts fs info and get a ref to it */ 20struct pts_fs_info;
23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); 21
24void devpts_put_ref(struct pts_fs_info *); 22struct pts_fs_info *devpts_acquire(struct file *);
23void devpts_release(struct pts_fs_info *);
25 24
26int devpts_new_index(struct pts_fs_info *); 25int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int); 26void devpts_kill_index(struct pts_fs_info *, int);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3fe90d494edb..e0b0741ae671 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -112,19 +112,24 @@ struct dma_buf_ops {
112 * @file: file pointer used for sharing buffers across, and for refcounting. 112 * @file: file pointer used for sharing buffers across, and for refcounting.
113 * @attachments: list of dma_buf_attachment that denotes all devices attached. 113 * @attachments: list of dma_buf_attachment that denotes all devices attached.
114 * @ops: dma_buf_ops associated with this buffer object. 114 * @ops: dma_buf_ops associated with this buffer object.
115 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
116 * @vmapping_counter: used internally to refcnt the vmaps
117 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
115 * @exp_name: name of the exporter; useful for debugging. 118 * @exp_name: name of the exporter; useful for debugging.
116 * @owner: pointer to exporter module; used for refcounting when exporter is a 119 * @owner: pointer to exporter module; used for refcounting when exporter is a
117 * kernel module. 120 * kernel module.
118 * @list_node: node for dma_buf accounting and debugging. 121 * @list_node: node for dma_buf accounting and debugging.
119 * @priv: exporter specific private data for this buffer object. 122 * @priv: exporter specific private data for this buffer object.
120 * @resv: reservation object linked to this dma-buf 123 * @resv: reservation object linked to this dma-buf
124 * @poll: for userspace poll support
125 * @cb_excl: for userspace poll support
126 * @cb_shared: for userspace poll support
121 */ 127 */
122struct dma_buf { 128struct dma_buf {
123 size_t size; 129 size_t size;
124 struct file *file; 130 struct file *file;
125 struct list_head attachments; 131 struct list_head attachments;
126 const struct dma_buf_ops *ops; 132 const struct dma_buf_ops *ops;
127 /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
128 struct mutex lock; 133 struct mutex lock;
129 unsigned vmapping_counter; 134 unsigned vmapping_counter;
130 void *vmap_ptr; 135 void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
188 193
189/** 194/**
190 * helper macro for exporters; zeros and fills in most common values 195 * helper macro for exporters; zeros and fills in most common values
196 *
197 * @name: export-info name
191 */ 198 */
192#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ 199#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
193 struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ 200 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
194 .owner = THIS_MODULE } 201 .owner = THIS_MODULE }
195 202
196/** 203/**
@@ -235,6 +242,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
235 unsigned long); 242 unsigned long);
236void *dma_buf_vmap(struct dma_buf *); 243void *dma_buf_vmap(struct dma_buf *);
237void dma_buf_vunmap(struct dma_buf *, void *vaddr); 244void dma_buf_vunmap(struct dma_buf *, void *vaddr);
238int dma_buf_debugfs_create_file(const char *name,
239 int (*write)(struct seq_file *));
240#endif /* __DMA_BUF_H__ */ 245#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c2db3ca22217..f196dd0b0f2f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
1005/* Iterate through an efi_memory_map */ 1005/* Iterate through an efi_memory_map */
1006#define for_each_efi_memory_desc_in_map(m, md) \ 1006#define for_each_efi_memory_desc_in_map(m, md) \
1007 for ((md) = (m)->map; \ 1007 for ((md) = (m)->map; \
1008 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 1008 ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
1009 (md) = (void *)(md) + (m)->desc_size) 1009 (md) = (void *)(md) + (m)->desc_size)
1010 1010
1011/** 1011/**
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644
index 000000000000..86baaa45567c
--- /dev/null
+++ b/include/linux/fence-array.h
@@ -0,0 +1,73 @@
1/*
2 * fence-array: aggregates fence to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#ifndef __LINUX_FENCE_ARRAY_H
21#define __LINUX_FENCE_ARRAY_H
22
23#include <linux/fence.h>
24
25/**
26 * struct fence_array_cb - callback helper for fence array
27 * @cb: fence callback structure for signaling
28 * @array: reference to the parent fence array object
29 */
30struct fence_array_cb {
31 struct fence_cb cb;
32 struct fence_array *array;
33};
34
35/**
36 * struct fence_array - fence to represent an array of fences
37 * @base: fence base class
38 * @lock: spinlock for fence handling
39 * @num_fences: number of fences in the array
40 * @num_pending: fences in the array still pending
41 * @fences: array of the fences
42 */
43struct fence_array {
44 struct fence base;
45
46 spinlock_t lock;
47 unsigned num_fences;
48 atomic_t num_pending;
49 struct fence **fences;
50};
51
52extern const struct fence_ops fence_array_ops;
53
54/**
55 * to_fence_array - cast a fence to a fence_array
56 * @fence: fence to cast to a fence_array
57 *
58 * Returns NULL if the fence is not a fence_array,
59 * or the fence_array otherwise.
60 */
61static inline struct fence_array *to_fence_array(struct fence *fence)
62{
63 if (fence->ops != &fence_array_ops)
64 return NULL;
65
66 return container_of(fence, struct fence_array, base);
67}
68
69struct fence_array *fence_array_create(int num_fences, struct fence **fences,
70 u64 context, unsigned seqno,
71 bool signal_on_any);
72
73#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698b60b8..44d945e96473 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,6 +49,8 @@ struct fence_cb;
49 * @timestamp: Timestamp when the fence was signaled. 49 * @timestamp: Timestamp when the fence was signaled.
50 * @status: Optional, only valid if < 0, must be set before calling 50 * @status: Optional, only valid if < 0, must be set before calling
51 * fence_signal, indicates that the fence has completed with an error. 51 * fence_signal, indicates that the fence has completed with an error.
52 * @child_list: list of children fences
53 * @active_list: list of active fences
52 * 54 *
53 * the flags member must be manipulated and read using the appropriate 55 * the flags member must be manipulated and read using the appropriate
54 * atomic ops (bit_*), so taking the spinlock will not be needed most 56 * atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -75,7 +77,8 @@ struct fence {
75 struct rcu_head rcu; 77 struct rcu_head rcu;
76 struct list_head cb_list; 78 struct list_head cb_list;
77 spinlock_t *lock; 79 spinlock_t *lock;
78 unsigned context, seqno; 80 u64 context;
81 unsigned seqno;
79 unsigned long flags; 82 unsigned long flags;
80 ktime_t timestamp; 83 ktime_t timestamp;
81 int status; 84 int status;
@@ -178,7 +181,7 @@ struct fence_ops {
178}; 181};
179 182
180void fence_init(struct fence *fence, const struct fence_ops *ops, 183void fence_init(struct fence *fence, const struct fence_ops *ops,
181 spinlock_t *lock, unsigned context, unsigned seqno); 184 spinlock_t *lock, u64 context, unsigned seqno);
182 185
183void fence_release(struct kref *kref); 186void fence_release(struct kref *kref);
184void fence_free(struct fence *fence); 187void fence_free(struct fence *fence);
@@ -352,27 +355,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
352 return ret < 0 ? ret : 0; 355 return ret < 0 ? ret : 0;
353} 356}
354 357
355unsigned fence_context_alloc(unsigned num); 358u64 fence_context_alloc(unsigned num);
356 359
357#define FENCE_TRACE(f, fmt, args...) \ 360#define FENCE_TRACE(f, fmt, args...) \
358 do { \ 361 do { \
359 struct fence *__ff = (f); \ 362 struct fence *__ff = (f); \
360 if (config_enabled(CONFIG_FENCE_TRACE)) \ 363 if (config_enabled(CONFIG_FENCE_TRACE)) \
361 pr_info("f %u#%u: " fmt, \ 364 pr_info("f %llu#%u: " fmt, \
362 __ff->context, __ff->seqno, ##args); \ 365 __ff->context, __ff->seqno, ##args); \
363 } while (0) 366 } while (0)
364 367
365#define FENCE_WARN(f, fmt, args...) \ 368#define FENCE_WARN(f, fmt, args...) \
366 do { \ 369 do { \
367 struct fence *__ff = (f); \ 370 struct fence *__ff = (f); \
368 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 371 pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
369 ##args); \ 372 ##args); \
370 } while (0) 373 } while (0)
371 374
372#define FENCE_ERR(f, fmt, args...) \ 375#define FENCE_ERR(f, fmt, args...) \
373 do { \ 376 do { \
374 struct fence *__ff = (f); \ 377 struct fence *__ff = (f); \
375 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 378 pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
376 ##args); \ 379 ##args); \
377 } while (0) 380 } while (0)
378 381
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e1526cd00..13ba552e6c09 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
241 241
242 /* check the consistency between the backing cache and the FS-Cache 242 /* check the consistency between the backing cache and the FS-Cache
243 * cookie */ 243 * cookie */
244 bool (*check_consistency)(struct fscache_operation *op); 244 int (*check_consistency)(struct fscache_operation *op);
245 245
246 /* store the updated auxiliary data on an object */ 246 /* store the updated auxiliary data on an object */
247 void (*update_object)(struct fscache_object *object); 247 void (*update_object)(struct fscache_object *object);
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d029ffac0d69..99403b19092f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -223,6 +223,8 @@ struct st_sensor_settings {
223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal. 223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
224 * @tf: Transfer function structure used by I/O operations. 224 * @tf: Transfer function structure used by I/O operations.
225 * @tb: Transfer buffers and mutex used by I/O operations. 225 * @tb: Transfer buffers and mutex used by I/O operations.
226 * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
227 * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
226 */ 228 */
227struct st_sensor_data { 229struct st_sensor_data {
228 struct device *dev; 230 struct device *dev;
@@ -247,6 +249,9 @@ struct st_sensor_data {
247 249
248 const struct st_sensor_transfer_function *tf; 250 const struct st_sensor_transfer_function *tf;
249 struct st_sensor_transfer_buffer tb; 251 struct st_sensor_transfer_buffer tb;
252
253 bool hw_irq_trigger;
254 s64 hw_timestamp;
250}; 255};
251 256
252#ifdef CONFIG_IIO_BUFFER 257#ifdef CONFIG_IIO_BUFFER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
260 const struct iio_trigger_ops *trigger_ops); 265 const struct iio_trigger_ops *trigger_ops);
261 266
262void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); 267void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
263 268int st_sensors_validate_device(struct iio_trigger *trig,
269 struct iio_dev *indio_dev);
264#else 270#else
265static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 271static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
266 const struct iio_trigger_ops *trigger_ops) 272 const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
271{ 277{
272 return; 278 return;
273} 279}
280#define st_sensors_validate_device NULL
274#endif 281#endif
275 282
276int st_sensors_init_sensor(struct iio_dev *indio_dev, 283int st_sensors_init_sensor(struct iio_dev *indio_dev,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d45513d..f8834f820ec2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
190#define INIT_TASK(tsk) \ 190#define INIT_TASK(tsk) \
191{ \ 191{ \
192 .state = 0, \ 192 .state = 0, \
193 .stack = &init_thread_info, \ 193 .stack = init_stack, \
194 .usage = ATOMIC_INIT(2), \ 194 .usage = ATOMIC_INIT(2), \
195 .flags = PF_KTHREAD, \ 195 .flags = PF_KTHREAD, \
196 .prio = MAX_PRIO-20, \ 196 .prio = MAX_PRIO-20, \
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b68c5..645ad06b5d52 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
100} 100}
101 101
102static inline void __iomem * 102static inline void __iomem *
103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 103io_mapping_map_wc(struct io_mapping *mapping,
104 unsigned long offset,
105 unsigned long size)
104{ 106{
105 resource_size_t phys_addr; 107 resource_size_t phys_addr;
106 108
107 BUG_ON(offset >= mapping->size); 109 BUG_ON(offset >= mapping->size);
108 phys_addr = mapping->base + offset; 110 phys_addr = mapping->base + offset;
109 111
110 return ioremap_wc(phys_addr, PAGE_SIZE); 112 return ioremap_wc(phys_addr, size);
111} 113}
112 114
113static inline void 115static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
155 157
156/* Non-atomic map/unmap */ 158/* Non-atomic map/unmap */
157static inline void __iomem * 159static inline void __iomem *
158io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 160io_mapping_map_wc(struct io_mapping *mapping,
161 unsigned long offset,
162 unsigned long size)
159{ 163{
160 return ((char __force __iomem *) mapping) + offset; 164 return ((char __force __iomem *) mapping) + offset;
161} 165}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index bfbd707de390..dc493e0f0ff7 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -305,12 +305,12 @@
305#define ICC_SGI1R_AFFINITY_1_SHIFT 16 305#define ICC_SGI1R_AFFINITY_1_SHIFT 16
306#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) 306#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
307#define ICC_SGI1R_SGI_ID_SHIFT 24 307#define ICC_SGI1R_SGI_ID_SHIFT 24
308#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) 308#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
309#define ICC_SGI1R_AFFINITY_2_SHIFT 32 309#define ICC_SGI1R_AFFINITY_2_SHIFT 32
310#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 310#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
311#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 311#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
312#define ICC_SGI1R_AFFINITY_3_SHIFT 48 312#define ICC_SGI1R_AFFINITY_3_SHIFT 48
313#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 313#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
314 314
315#include <asm/arch_gicv3.h> 315#include <asm/arch_gicv3.h>
316 316
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 5ab85281230b..f2d0258414cf 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -6,6 +6,7 @@
6#define __LINUX_ISA_H 6#define __LINUX_ISA_H
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/errno.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10 11
11struct isa_driver { 12struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
22 23
23#define to_isa_driver(x) container_of((x), struct isa_driver, driver) 24#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
24 25
25#ifdef CONFIG_ISA 26#ifdef CONFIG_ISA_BUS_API
26int isa_register_driver(struct isa_driver *, unsigned int); 27int isa_register_driver(struct isa_driver *, unsigned int);
27void isa_unregister_driver(struct isa_driver *); 28void isa_unregister_driver(struct isa_driver *);
28#else 29#else
29static inline int isa_register_driver(struct isa_driver *d, unsigned int i) 30static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
30{ 31{
31 return 0; 32 return -ENODEV;
32} 33}
33 34
34static inline void isa_unregister_driver(struct isa_driver *d) 35static inline void isa_unregister_driver(struct isa_driver *d)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0536524bb9eb..68904469fba1 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -117,13 +117,18 @@ struct module;
117 117
118#include <linux/atomic.h> 118#include <linux/atomic.h>
119 119
120#ifdef HAVE_JUMP_LABEL
121
120static inline int static_key_count(struct static_key *key) 122static inline int static_key_count(struct static_key *key)
121{ 123{
122 return atomic_read(&key->enabled); 124 /*
125 * -1 means the first static_key_slow_inc() is in progress.
126 * static_key_enabled() must return true, so return 1 here.
127 */
128 int n = atomic_read(&key->enabled);
129 return n >= 0 ? n : 1;
123} 130}
124 131
125#ifdef HAVE_JUMP_LABEL
126
127#define JUMP_TYPE_FALSE 0UL 132#define JUMP_TYPE_FALSE 0UL
128#define JUMP_TYPE_TRUE 1UL 133#define JUMP_TYPE_TRUE 1UL
129#define JUMP_TYPE_MASK 1UL 134#define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
162 167
163#else /* !HAVE_JUMP_LABEL */ 168#else /* !HAVE_JUMP_LABEL */
164 169
170static inline int static_key_count(struct static_key *key)
171{
172 return atomic_read(&key->enabled);
173}
174
165static __always_inline void jump_label_init(void) 175static __always_inline void jump_label_init(void)
166{ 176{
167 static_key_initialized = true; 177 static_key_initialized = true;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 611927f5870d..ac4b3c46a84d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
59 59
60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
61void kasan_kfree_large(const void *ptr); 61void kasan_kfree_large(const void *ptr);
62void kasan_kfree(void *ptr); 62void kasan_poison_kfree(void *ptr);
63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, 63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
64 gfp_t flags); 64 gfp_t flags);
65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); 65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
66 66
67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); 67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
68bool kasan_slab_free(struct kmem_cache *s, void *object); 68bool kasan_slab_free(struct kmem_cache *s, void *object);
69void kasan_poison_slab_free(struct kmem_cache *s, void *object);
70 69
71struct kasan_cache { 70struct kasan_cache {
72 int alloc_meta_offset; 71 int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
76int kasan_module_alloc(void *addr, size_t size); 75int kasan_module_alloc(void *addr, size_t size);
77void kasan_free_shadow(const struct vm_struct *vm); 76void kasan_free_shadow(const struct vm_struct *vm);
78 77
78size_t ksize(const void *);
79static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
80
79#else /* CONFIG_KASAN */ 81#else /* CONFIG_KASAN */
80 82
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 83static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
102 104
103static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} 105static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
104static inline void kasan_kfree_large(const void *ptr) {} 106static inline void kasan_kfree_large(const void *ptr) {}
105static inline void kasan_kfree(void *ptr) {} 107static inline void kasan_poison_kfree(void *ptr) {}
106static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 108static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
107 size_t size, gfp_t flags) {} 109 size_t size, gfp_t flags) {}
108static inline void kasan_krealloc(const void *object, size_t new_size, 110static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
114{ 116{
115 return false; 117 return false;
116} 118}
117static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
118 119
119static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 120static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
120static inline void kasan_free_shadow(const struct vm_struct *vm) {} 121static inline void kasan_free_shadow(const struct vm_struct *vm) {}
121 122
123static inline void kasan_unpoison_slab(const void *ptr) { }
124
122#endif /* CONFIG_KASAN */ 125#endif /* CONFIG_KASAN */
123 126
124#endif /* LINUX_KASAN_H */ 127#endif /* LINUX_KASAN_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d2b13066e781..e5e7f2e80a54 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -42,15 +42,16 @@ struct led_classdev {
42#define LED_UNREGISTERING (1 << 1) 42#define LED_UNREGISTERING (1 << 1)
43 /* Upper 16 bits reflect control information */ 43 /* Upper 16 bits reflect control information */
44#define LED_CORE_SUSPENDRESUME (1 << 16) 44#define LED_CORE_SUSPENDRESUME (1 << 16)
45#define LED_BLINK_ONESHOT (1 << 17) 45#define LED_BLINK_SW (1 << 17)
46#define LED_BLINK_ONESHOT_STOP (1 << 18) 46#define LED_BLINK_ONESHOT (1 << 18)
47#define LED_BLINK_INVERT (1 << 19) 47#define LED_BLINK_ONESHOT_STOP (1 << 19)
48#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) 48#define LED_BLINK_INVERT (1 << 20)
49#define LED_BLINK_DISABLE (1 << 21) 49#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
50#define LED_SYSFS_DISABLE (1 << 22) 50#define LED_BLINK_DISABLE (1 << 22)
51#define LED_DEV_CAP_FLASH (1 << 23) 51#define LED_SYSFS_DISABLE (1 << 23)
52#define LED_HW_PLUGGABLE (1 << 24) 52#define LED_DEV_CAP_FLASH (1 << 24)
53#define LED_PANIC_INDICATOR (1 << 25) 53#define LED_HW_PLUGGABLE (1 << 25)
54#define LED_PANIC_INDICATOR (1 << 26)
54 55
55 /* Set LED brightness level 56 /* Set LED brightness level
56 * Must not sleep. Use brightness_set_blocking for drivers 57 * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
72 * and if both are zero then a sensible default should be chosen. 73 * and if both are zero then a sensible default should be chosen.
73 * The call should adjust the timings in that case and if it can't 74 * The call should adjust the timings in that case and if it can't
74 * match the values specified exactly. 75 * match the values specified exactly.
75 * Deactivate blinking again when the brightness is set to a fixed 76 * Deactivate blinking again when the brightness is set to LED_OFF
76 * value via the brightness_set() callback. 77 * via the brightness_set() callback.
77 */ 78 */
78 int (*blink_set)(struct led_classdev *led_cdev, 79 int (*blink_set)(struct led_classdev *led_cdev,
79 unsigned long *delay_on, 80 unsigned long *delay_on,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 035abdf62cfe..73a48479892d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
1240 u8 rsvd[8]; 1240 u8 rsvd[8];
1241}; 1241};
1242 1242
1243#define MLX5_CMD_OP_MAX 0x920
1244
1245enum { 1243enum {
1246 VPORT_STATE_DOWN = 0x0, 1244 VPORT_STATE_DOWN = 0x0,
1247 VPORT_STATE_UP = 0x1, 1245 VPORT_STATE_UP = 0x1,
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
1369#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1367#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1370 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1368 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1371 1369
1370#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1371 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1372
1373#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1374 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1375
1372#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1376#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1373 MLX5_GET(flow_table_eswitch_cap, \ 1377 MLX5_GET(flow_table_eswitch_cap, \
1374 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1378 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 9a05cd7e5890..e955a2859009 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -205,7 +205,8 @@ enum {
205 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, 205 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
206 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 206 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
207 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 207 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
208 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c 208 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
209 MLX5_CMD_OP_MAX
209}; 210};
210 211
211struct mlx5_ifc_flow_table_fields_supported_bits { 212struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
500 u8 vport_svlan_insert[0x1]; 501 u8 vport_svlan_insert[0x1];
501 u8 vport_cvlan_insert_if_not_exist[0x1]; 502 u8 vport_cvlan_insert_if_not_exist[0x1];
502 u8 vport_cvlan_insert_overwrite[0x1]; 503 u8 vport_cvlan_insert_overwrite[0x1];
503 u8 reserved_at_5[0x1b]; 504 u8 reserved_at_5[0x19];
505 u8 nic_vport_node_guid_modify[0x1];
506 u8 nic_vport_port_guid_modify[0x1];
504 507
505 u8 reserved_at_20[0x7e0]; 508 u8 reserved_at_20[0x7e0];
506}; 509};
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
4583}; 4586};
4584 4587
4585struct mlx5_ifc_modify_nic_vport_field_select_bits { 4588struct mlx5_ifc_modify_nic_vport_field_select_bits {
4586 u8 reserved_at_0[0x19]; 4589 u8 reserved_at_0[0x16];
4590 u8 node_guid[0x1];
4591 u8 port_guid[0x1];
4592 u8 reserved_at_18[0x1];
4587 u8 mtu[0x1]; 4593 u8 mtu[0x1];
4588 u8 change_event[0x1]; 4594 u8 change_event[0x1];
4589 u8 promisc[0x1]; 4595 u8 promisc[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 64221027bf1f..ab310819ac36 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -172,6 +172,7 @@ enum {
172enum { 172enum {
173 MLX5_FENCE_MODE_NONE = 0 << 5, 173 MLX5_FENCE_MODE_NONE = 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, 174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
175 MLX5_FENCE_MODE_FENCE = 2 << 5,
175 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, 176 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
176 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, 177 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
177}; 178};
@@ -460,10 +461,9 @@ struct mlx5_core_qp {
460}; 461};
461 462
462struct mlx5_qp_path { 463struct mlx5_qp_path {
463 u8 fl; 464 u8 fl_free_ar;
464 u8 rsvd3; 465 u8 rsvd3;
465 u8 free_ar; 466 __be16 pkey_index;
466 u8 pkey_index;
467 u8 rsvd0; 467 u8 rsvd0;
468 u8 grh_mlid; 468 u8 grh_mlid;
469 __be16 rlid; 469 __be16 rlid;
@@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in {
560 __be32 optparam; 560 __be32 optparam;
561 u8 rsvd0[4]; 561 u8 rsvd0[4];
562 struct mlx5_qp_context ctx; 562 struct mlx5_qp_context ctx;
563 u8 rsvd2[16];
563}; 564};
564 565
565struct mlx5_modify_qp_mbox_out { 566struct mlx5_modify_qp_mbox_out {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 301da4a5e6bf..6c16c198f680 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
51 u64 *system_image_guid); 51 u64 *system_image_guid);
52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
53int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
54 u32 vport, u64 node_guid);
53int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 55int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
54 u16 *qkey_viol_cntr); 56 u16 *qkey_viol_cntr);
55int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 57int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5df5feb49575..ece042dfe23c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
602} 602}
603 603
604void do_set_pte(struct vm_area_struct *vma, unsigned long address, 604void do_set_pte(struct vm_area_struct *vma, unsigned long address,
605 struct page *page, pte_t *pte, bool write, bool anon, bool old); 605 struct page *page, pte_t *pte, bool write, bool anon);
606#endif 606#endif
607 607
608/* 608/*
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ec5ec2818a28..d3d0398f2a1b 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
45#define LOOKUP_ROOT 0x2000 45#define LOOKUP_ROOT 0x2000
46#define LOOKUP_EMPTY 0x4000 46#define LOOKUP_EMPTY 0x4000
47 47
48extern int path_pts(struct path *path);
49
48extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); 50extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
49 51
50static inline int user_path_at(int dfd, const char __user *name, unsigned flags, 52static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
diff --git a/include/linux/of.h b/include/linux/of.h
index c7292e8ea080..74eb28cadbef 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np,
614 return NULL; 614 return NULL;
615} 615}
616 616
617static inline int of_parse_phandle_with_args(struct device_node *np, 617static inline int of_parse_phandle_with_args(const struct device_node *np,
618 const char *list_name, 618 const char *list_name,
619 const char *cells_name, 619 const char *cells_name,
620 int index, 620 int index,
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f6e9e85164e8..b969e9443962 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -8,7 +8,7 @@ struct pci_dev;
8struct of_phandle_args; 8struct of_phandle_args;
9struct device_node; 9struct device_node;
10 10
11#ifdef CONFIG_OF 11#ifdef CONFIG_OF_PCI
12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); 12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index ad2f67054372..c201060e0c6d 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
31int of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
35 phys_addr_t align,
36 phys_addr_t start,
37 phys_addr_t end,
38 bool nomap,
39 phys_addr_t *res_base);
40
34void fdt_init_reserved_mem(void); 41void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 42void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 43 phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
46 46
47static inline bool page_is_young(struct page *page) 47static inline bool page_is_young(struct page *page)
48{ 48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 49 struct page_ext *page_ext = lookup_page_ext(page);
50
51 if (unlikely(!page_ext))
52 return false;
53
54 return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
50} 55}
51 56
52static inline void set_page_young(struct page *page) 57static inline void set_page_young(struct page *page)
53{ 58{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 59 struct page_ext *page_ext = lookup_page_ext(page);
60
61 if (unlikely(!page_ext))
62 return;
63
64 set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
55} 65}
56 66
57static inline bool test_and_clear_page_young(struct page *page) 67static inline bool test_and_clear_page_young(struct page *page)
58{ 68{
59 return test_and_clear_bit(PAGE_EXT_YOUNG, 69 struct page_ext *page_ext = lookup_page_ext(page);
60 &lookup_page_ext(page)->flags); 70
71 if (unlikely(!page_ext))
72 return false;
73
74 return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
61} 75}
62 76
63static inline bool page_is_idle(struct page *page) 77static inline bool page_is_idle(struct page *page)
64{ 78{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 79 struct page_ext *page_ext = lookup_page_ext(page);
80
81 if (unlikely(!page_ext))
82 return false;
83
84 return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
66} 85}
67 86
68static inline void set_page_idle(struct page *page) 87static inline void set_page_idle(struct page *page)
69{ 88{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 89 struct page_ext *page_ext = lookup_page_ext(page);
90
91 if (unlikely(!page_ext))
92 return;
93
94 set_bit(PAGE_EXT_IDLE, &page_ext->flags);
71} 95}
72 96
73static inline void clear_page_idle(struct page *page) 97static inline void clear_page_idle(struct page *page)
74{ 98{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 99 struct page_ext *page_ext = lookup_page_ext(page);
100
101 if (unlikely(!page_ext))
102 return;
103
104 clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
76} 105}
77#endif /* CONFIG_64BIT */ 106#endif /* CONFIG_64BIT */
78 107
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
new file mode 100644
index 000000000000..679177929045
--- /dev/null
+++ b/include/linux/platform_data/omapdss.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2016 Texas Instruments, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __OMAPDSS_PDATA_H
11#define __OMAPDSS_PDATA_H
12
13enum omapdss_version {
14 OMAPDSS_VER_UNKNOWN = 0,
15 OMAPDSS_VER_OMAP24xx,
16 OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
17 OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
18 OMAPDSS_VER_OMAP3630,
19 OMAPDSS_VER_AM35xx,
20 OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
21 OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
22 OMAPDSS_VER_OMAP4, /* All other OMAP4s */
23 OMAPDSS_VER_OMAP5,
24 OMAPDSS_VER_AM43xx,
25 OMAPDSS_VER_DRA7xx,
26};
27
28/* Board specific data */
29struct omap_dss_board_info {
30 const char *default_display_name;
31 int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask);
32 void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask);
33 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
34 enum omapdss_version version;
35};
36
37#endif /* __OMAPDSS_PDATA_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 17018f3c066e..908b67c847cd 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
235 if (!pwm) 235 if (!pwm)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 if (duty_ns < 0 || period_ns < 0)
239 return -EINVAL;
240
238 pwm_get_state(pwm, &state); 241 pwm_get_state(pwm, &state);
239 if (state.duty_cycle == duty_ns && state.period == period_ns) 242 if (state.duty_cycle == duty_ns && state.period == period_ns)
240 return 0; 243 return 0;
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 49d057655d62..b0f305e77b7f 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class; 49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[]; 50extern const char reservation_seqcount_string[];
51 51
52/**
53 * struct reservation_object_list - a list of shared fences
54 * @rcu: for internal use
55 * @shared_count: table of shared fences
56 * @shared_max: for growing shared fence table
57 * @shared: shared fence table
58 */
52struct reservation_object_list { 59struct reservation_object_list {
53 struct rcu_head rcu; 60 struct rcu_head rcu;
54 u32 shared_count, shared_max; 61 u32 shared_count, shared_max;
55 struct fence __rcu *shared[]; 62 struct fence __rcu *shared[];
56}; 63};
57 64
65/**
66 * struct reservation_object - a reservation object manages fences for a buffer
67 * @lock: update side lock
68 * @seq: sequence count for managing RCU read-side synchronization
69 * @fence_excl: the exclusive fence, if there is one currently
70 * @fence: list of current shared fences
71 * @staged: staged copy of shared fences for RCU updates
72 */
58struct reservation_object { 73struct reservation_object {
59 struct ww_mutex lock; 74 struct ww_mutex lock;
60 seqcount_t seq; 75 seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
68#define reservation_object_assert_held(obj) \ 83#define reservation_object_assert_held(obj) \
69 lockdep_assert_held(&(obj)->lock.base) 84 lockdep_assert_held(&(obj)->lock.base)
70 85
86/**
87 * reservation_object_init - initialize a reservation object
88 * @obj: the reservation object
89 */
71static inline void 90static inline void
72reservation_object_init(struct reservation_object *obj) 91reservation_object_init(struct reservation_object *obj)
73{ 92{
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
79 obj->staged = NULL; 98 obj->staged = NULL;
80} 99}
81 100
101/**
102 * reservation_object_fini - destroys a reservation object
103 * @obj: the reservation object
104 */
82static inline void 105static inline void
83reservation_object_fini(struct reservation_object *obj) 106reservation_object_fini(struct reservation_object *obj)
84{ 107{
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
106 ww_mutex_destroy(&obj->lock); 129 ww_mutex_destroy(&obj->lock);
107} 130}
108 131
132/**
133 * reservation_object_get_list - get the reservation object's
134 * shared fence list, with update-side lock held
135 * @obj: the reservation object
136 *
137 * Returns the shared fence list. Does NOT take references to
138 * the fence. The obj->lock must be held.
139 */
109static inline struct reservation_object_list * 140static inline struct reservation_object_list *
110reservation_object_get_list(struct reservation_object *obj) 141reservation_object_get_list(struct reservation_object *obj)
111{ 142{
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
113 reservation_object_held(obj)); 144 reservation_object_held(obj));
114} 145}
115 146
147/**
148 * reservation_object_get_excl - get the reservation object's
149 * exclusive fence, with update-side lock held
150 * @obj: the reservation object
151 *
152 * Returns the exclusive fence (if any). Does NOT take a
153 * reference. The obj->lock must be held.
154 *
155 * RETURNS
156 * The exclusive fence or NULL
157 */
116static inline struct fence * 158static inline struct fence *
117reservation_object_get_excl(struct reservation_object *obj) 159reservation_object_get_excl(struct reservation_object *obj)
118{ 160{
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
120 reservation_object_held(obj)); 162 reservation_object_held(obj));
121} 163}
122 164
165/**
166 * reservation_object_get_excl_rcu - get the reservation object's
167 * exclusive fence, without lock held.
168 * @obj: the reservation object
169 *
170 * If there is an exclusive fence, this atomically increments it's
171 * reference count and returns it.
172 *
173 * RETURNS
174 * The exclusive fence or NULL if none
175 */
123static inline struct fence * 176static inline struct fence *
124reservation_object_get_excl_rcu(struct reservation_object *obj) 177reservation_object_get_excl_rcu(struct reservation_object *obj)
125{ 178{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e42ada26345..253538f29ade 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3007,7 +3007,7 @@ static inline int object_is_on_stack(void *obj)
3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3008} 3008}
3009 3009
3010extern void thread_info_cache_init(void); 3010extern void thread_stack_cache_init(void);
3011 3011
3012#ifdef CONFIG_DEBUG_STACK_USAGE 3012#ifdef CONFIG_DEBUG_STACK_USAGE
3013static inline unsigned long stack_not_used(struct task_struct *p) 3013static inline unsigned long stack_not_used(struct task_struct *p)
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index dacb5e711994..de1f64318fc4 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -765,6 +765,8 @@ struct sctp_info {
765 __u8 sctpi_s_disable_fragments; 765 __u8 sctpi_s_disable_fragments;
766 __u8 sctpi_s_v4mapped; 766 __u8 sctpi_s_v4mapped;
767 __u8 sctpi_s_frag_interleave; 767 __u8 sctpi_s_frag_interleave;
768 __u32 sctpi_s_type;
769 __u32 __reserved3;
768}; 770};
769 771
770struct sctp_infox { 772struct sctp_infox {
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7973a821ac58..ead97654c4e9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
277 277
278static inline int raw_read_seqcount_latch(seqcount_t *s) 278static inline int raw_read_seqcount_latch(seqcount_t *s)
279{ 279{
280 return lockless_dereference(s)->sequence; 280 int seq = READ_ONCE(s->sequence);
281 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
282 smp_read_barrier_depends();
283 return seq;
281} 284}
282 285
283/** 286/**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
331 * unsigned seq, idx; 334 * unsigned seq, idx;
332 * 335 *
333 * do { 336 * do {
334 * seq = lockless_dereference(latch)->seq; 337 * seq = raw_read_seqcount_latch(&latch->seq);
335 * 338 *
336 * idx = seq & 0x01; 339 * idx = seq & 0x01;
337 * entry = data_query(latch->data[idx], ...); 340 * entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 19c659d1c0f8..b6810c92b8bb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -137,8 +137,6 @@ struct rpc_create_args {
137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) 137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
138 138
139struct rpc_clnt *rpc_create(struct rpc_create_args *args); 139struct rpc_clnt *rpc_create(struct rpc_create_args *args);
140struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
141 struct rpc_xprt *xprt);
142struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 140struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
143 const struct rpc_program *, u32); 141 const struct rpc_program *, u32);
144struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 142struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index b7dabc4baafd..79ba50856707 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -84,6 +84,7 @@ struct svc_xprt {
84 84
85 struct net *xpt_net; 85 struct net *xpt_net;
86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ 86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
87 struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
87}; 88};
88 89
89static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 90static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5aa3834619a8..5e3e1b63dbb3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -297,6 +297,7 @@ struct xprt_create {
297 size_t addrlen; 297 size_t addrlen;
298 const char *servername; 298 const char *servername;
299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
300 struct rpc_xprt_switch *bc_xps;
300 unsigned int flags; 301 unsigned int flags;
301}; 302};
302 303
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e45abe7db9a6..ee517bef0db0 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -335,6 +335,8 @@ struct thermal_genl_event {
335 * @get_trend: a pointer to a function that reads the sensor temperature trend. 335 * @get_trend: a pointer to a function that reads the sensor temperature trend.
336 * @set_emul_temp: a pointer to a function that sets sensor emulated 336 * @set_emul_temp: a pointer to a function that sets sensor emulated
337 * temperature. 337 * temperature.
338 * @set_trip_temp: a pointer to a function that sets the trip temperature on
339 * hardware.
338 */ 340 */
339struct thermal_zone_of_device_ops { 341struct thermal_zone_of_device_ops {
340 int (*get_temp)(void *, int *); 342 int (*get_temp)(void *, int *);
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 37dbacf84849..816b7543f81b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
21 struct timespec64 ts64; 21 struct timespec64 ts64;
22 22
23 if (!tv) 23 if (!tv)
24 return do_sys_settimeofday64(NULL, tz);
25
26 if (!timespec_valid(tv))
24 return -EINVAL; 27 return -EINVAL;
25 28
26 ts64 = timespec_to_timespec64(*tv); 29 ts64 = timespec_to_timespec64(*tv);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 457651bf45b0..fefe8b06a63d 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
1034 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, 1034 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
1035 * this driver will be bound to any available UDC. 1035 * this driver will be bound to any available UDC.
1036 * @pending: UDC core private data used for deferred probe of this driver. 1036 * @pending: UDC core private data used for deferred probe of this driver.
1037 * @match_existing_only: If udc is not found, return an error and don't add this
1038 * gadget driver to list of pending driver
1037 * 1039 *
1038 * Devices are disabled till a gadget driver successfully bind()s, which 1040 * Devices are disabled till a gadget driver successfully bind()s, which
1039 * means the driver will handle setup() requests needed to enumerate (and 1041 * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +1099,7 @@ struct usb_gadget_driver {
1097 1099
1098 char *udc_name; 1100 char *udc_name;
1099 struct list_head pending; 1101 struct list_head pending;
1102 unsigned match_existing_only:1;
1100}; 1103};
1101 1104
1102 1105
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 0b3da40a525e..d315c8907869 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
142}; 142};
143 143
144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC) 144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
145void musb_mailbox(enum musb_vbus_id_status status); 145int musb_mailbox(enum musb_vbus_id_status status);
146#else 146#else
147static inline void musb_mailbox(enum musb_vbus_id_status status) 147static inline int musb_mailbox(enum musb_vbus_id_status status)
148{ 148{
149 return 0;
149} 150}
150#endif 151#endif
151 152
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b39a5f3153bd..960bedbdec87 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
165 165
166int vga_switcheroo_process_delayed_switch(void); 166int vga_switcheroo_process_delayed_switch(void);
167 167
168bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
168enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); 169enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
169 170
170void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); 171void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
188static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } 189static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
189static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } 190static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
190static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 191static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
192static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
191static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 193static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
192 194
193static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} 195static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 98a938aabdfb..7a8d6037a4bb 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * v4l2-mc.h - Media Controller V4L2 types and prototypes 2 * v4l2-mc.h - Media Controller V4L2 types and prototypes
3 * 3 *
4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
5 * Copyright (C) 2006-2010 Nokia Corporation 5 * Copyright (C) 2006-2010 Nokia Corporation
6 * Copyright (c) 2016 Intel Corporation. 6 * Copyright (c) 2016 Intel Corporation.
7 * 7 *
diff --git a/include/net/compat.h b/include/net/compat.h
index 48103cf94e97..13de0ccaa059 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
42 42
43int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, 43int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
44 struct sockaddr __user **, struct iovec **); 44 struct sockaddr __user **, struct iovec **);
45struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
45asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, 46asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
46 unsigned int); 47 unsigned int);
47asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, 48asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d325c81332e3..43a5a0e4524c 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
63 u8 *protocol, struct flowi6 *fl6); 63 u8 *protocol, struct flowi6 *fl6);
64}; 64};
65 65
66#ifdef CONFIG_INET
67
66extern const struct ip6_tnl_encap_ops __rcu * 68extern const struct ip6_tnl_encap_ops __rcu *
67 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; 69 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
68 70
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
138int ip6_tnl_get_iflink(const struct net_device *dev); 140int ip6_tnl_get_iflink(const struct net_device *dev);
139int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); 141int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
140 142
141#ifdef CONFIG_INET
142static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, 143static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
143 struct net_device *dev) 144 struct net_device *dev)
144{ 145{
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index af4c10ebb241..cd6018a9ee24 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1232const char *ip_vs_state_name(__u16 proto, int state); 1232const char *ip_vs_state_name(__u16 proto, int state);
1233 1233
1234void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1234void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
1235int ip_vs_check_template(struct ip_vs_conn *ct); 1235int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
1236void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1236void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
1237int ip_vs_conn_init(void); 1237int ip_vs_conn_init(void);
1238void ip_vs_conn_cleanup(void); 1238void ip_vs_conn_cleanup(void);
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 9c5638ad872e..0dbce55437f2 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,8 +28,8 @@ struct nf_queue_handler {
28 struct nf_hook_ops *ops); 28 struct nf_hook_ops *ops);
29}; 29};
30 30
31void nf_register_queue_handler(const struct nf_queue_handler *qh); 31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
32void nf_unregister_queue_handler(void); 32void nf_unregister_queue_handler(struct net *net);
33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); 33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
34 34
35void nf_queue_entry_get_refs(struct nf_queue_entry *entry); 35void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 38aa4983e2a9..36d723579af2 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -5,11 +5,13 @@
5 5
6struct proc_dir_entry; 6struct proc_dir_entry;
7struct nf_logger; 7struct nf_logger;
8struct nf_queue_handler;
8 9
9struct netns_nf { 10struct netns_nf {
10#if defined CONFIG_PROC_FS 11#if defined CONFIG_PROC_FS
11 struct proc_dir_entry *proc_netfilter; 12 struct proc_dir_entry *proc_netfilter;
12#endif 13#endif
14 const struct nf_queue_handler __rcu *queue_handler;
13 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; 15 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
14#ifdef CONFIG_SYSCTL 16#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *nf_log_dir_header; 17 struct ctl_table_header *nf_log_dir_header;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 0f7efa88f210..3722dda0199d 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
392 }; 392 };
393}; 393};
394 394
395static inline bool tc_should_offload(struct net_device *dev, u32 flags) 395static inline bool tc_should_offload(const struct net_device *dev,
396 const struct tcf_proto *tp, u32 flags)
396{ 397{
398 const struct Qdisc *sch = tp->q;
399 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
400
397 if (!(dev->features & NETIF_F_HW_TC)) 401 if (!(dev->features & NETIF_F_HW_TC))
398 return false; 402 return false;
399
400 if (flags & TCA_CLS_FLAGS_SKIP_HW) 403 if (flags & TCA_CLS_FLAGS_SKIP_HW)
401 return false; 404 return false;
402
403 if (!dev->netdev_ops->ndo_setup_tc) 405 if (!dev->netdev_ops->ndo_setup_tc)
404 return false; 406 return false;
407 if (cops && cops->tcf_cl_offload)
408 return cops->tcf_cl_offload(tp->classid);
405 409
406 return true; 410 return true;
407} 411}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 401038d2f9b8..fea53f4d92ca 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
61} 61}
62 62
63struct qdisc_watchdog { 63struct qdisc_watchdog {
64 u64 last_expires;
64 struct hrtimer timer; 65 struct hrtimer timer;
65 struct Qdisc *qdisc; 66 struct Qdisc *qdisc;
66}; 67};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a1fd76c22a59..62d553184e91 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
168 168
169 /* Filter manipulation */ 169 /* Filter manipulation */
170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
171 bool (*tcf_cl_offload)(u32 classid);
171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
172 u32 classid); 173 u32 classid);
173 void (*unbind_tcf)(struct Qdisc *, unsigned long); 174 void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
691 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 692 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
692 if (!sch->gso_skb) { 693 if (!sch->gso_skb) {
693 sch->gso_skb = sch->dequeue(sch); 694 sch->gso_skb = sch->dequeue(sch);
694 if (sch->gso_skb) 695 if (sch->gso_skb) {
695 /* it's still part of the queue */ 696 /* it's still part of the queue */
697 qdisc_qstats_backlog_inc(sch, sch->gso_skb);
696 sch->q.qlen++; 698 sch->q.qlen++;
699 }
697 } 700 }
698 701
699 return sch->gso_skb; 702 return sch->gso_skb;
@@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
706 709
707 if (skb) { 710 if (skb) {
708 sch->gso_skb = NULL; 711 sch->gso_skb = NULL;
712 qdisc_qstats_backlog_dec(sch, skb);
709 sch->q.qlen--; 713 sch->q.qlen--;
710 } else { 714 } else {
711 skb = sch->dequeue(sch); 715 skb = sch->dequeue(sch);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 432bed510369..7e440d41487a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -217,10 +217,10 @@ enum ib_device_cap_flags {
217 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 217 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
220 IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), 220 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
222 IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), 222 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
223 IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34), 223 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
224}; 224};
225 225
226enum ib_signature_prot_cap { 226enum ib_signature_prot_cap {
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 16274e2133cd..9c9a27d42aaa 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
203 203
204 /* 204 /*
205 * Allocate a private queue pair data structure for driver specific 205 * Allocate a private queue pair data structure for driver specific
206 * information which is opaque to rdmavt. 206 * information which is opaque to rdmavt. Errors are returned via
207 * ERR_PTR(err). The driver is free to return NULL or a valid
208 * pointer.
207 */ 209 */
208 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 210 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
209 gfp_t gfp); 211 gfp_t gfp);
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index afdb416898e0..1df2ff61a4dd 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -16,11 +16,16 @@
16 * 16 *
17 */ 17 */
18 18
19#include <video/omapdss.h>
20
21#ifndef __OMAP_HDMI_AUDIO_H__ 19#ifndef __OMAP_HDMI_AUDIO_H__
22#define __OMAP_HDMI_AUDIO_H__ 20#define __OMAP_HDMI_AUDIO_H__
23 21
22#include <linux/platform_data/omapdss.h>
23
24struct omap_dss_audio {
25 struct snd_aes_iec958 *iec;
26 struct snd_cea_861_aud_if *cea;
27};
28
24struct omap_hdmi_audio_ops { 29struct omap_hdmi_audio_ops {
25 int (*audio_startup)(struct device *dev, 30 int (*audio_startup)(struct device *dev,
26 void (*abort_cb)(struct device *dev)); 31 void (*abort_cb)(struct device *dev));
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 23c6960e94a4..2bdd1e3e7007 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
118 }; 118 };
119 union { 119 union {
120 char name[BTRFS_SUBVOL_NAME_MAX + 1]; 120 char name[BTRFS_SUBVOL_NAME_MAX + 1];
121 u64 devid; 121 __u64 devid;
122 }; 122 };
123}; 123};
124 124
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9222db8ccccc..5f030b46cff4 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, 1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, 1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, 1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
1356 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
1357 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
1358 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
1359 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
1360 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
1361 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
1356 1365
1357 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1366 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
1358 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* 1367 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
1361 */ 1370 */
1362 1371
1363 __ETHTOOL_LINK_MODE_LAST 1372 __ETHTOOL_LINK_MODE_LAST
1364 = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 1373 = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1365}; 1374};
1366 1375
1367#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1376#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index ca1054dd8249..72a04a0e8cce 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -1,5 +1,5 @@
1#ifndef _UAPI_LINUX_GTP_H_ 1#ifndef _UAPI_LINUX_GTP_H_
2#define _UAPI_LINUX_GTP_H__ 2#define _UAPI_LINUX_GTP_H_
3 3
4enum gtp_genl_cmds { 4enum gtp_genl_cmds {
5 GTP_CMD_NEWPDP, 5 GTP_CMD_NEWPDP,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index eba5914ba5d1..f4297c8a42fe 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -145,6 +145,8 @@ enum {
145 TCA_POLICE_PEAKRATE, 145 TCA_POLICE_PEAKRATE,
146 TCA_POLICE_AVRATE, 146 TCA_POLICE_AVRATE,
147 TCA_POLICE_RESULT, 147 TCA_POLICE_RESULT,
148 TCA_POLICE_TM,
149 TCA_POLICE_PAD,
148 __TCA_POLICE_MAX 150 __TCA_POLICE_MAX
149#define TCA_POLICE_RESULT TCA_POLICE_RESULT 151#define TCA_POLICE_RESULT TCA_POLICE_RESULT
150}; 152};
@@ -173,7 +175,7 @@ enum {
173 TCA_U32_DIVISOR, 175 TCA_U32_DIVISOR,
174 TCA_U32_SEL, 176 TCA_U32_SEL,
175 TCA_U32_POLICE, 177 TCA_U32_POLICE,
176 TCA_U32_ACT, 178 TCA_U32_ACT,
177 TCA_U32_INDEV, 179 TCA_U32_INDEV,
178 TCA_U32_PCNT, 180 TCA_U32_PCNT,
179 TCA_U32_MARK, 181 TCA_U32_MARK,
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index a7f27704f980..691984cb0b91 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -1,5 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += asequencer.h 2header-y += asequencer.h
3header-y += asoc.h
3header-y += asound.h 4header-y += asound.h
4header-y += asound_fm.h 5header-y += asound_fm.h
5header-y += compress_offload.h 6header-y += compress_offload.h
@@ -10,3 +11,5 @@ header-y += hdsp.h
10header-y += hdspm.h 11header-y += hdspm.h
11header-y += sb16_csp.h 12header-y += sb16_csp.h
12header-y += sfnt_info.h 13header-y += sfnt_info.h
14header-y += tlv.h
15header-y += usb_stream.h
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 56830d1dc762..e7003ee6e063 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -27,59 +27,18 @@
27#ifndef __OMAP_PANEL_DATA_H 27#ifndef __OMAP_PANEL_DATA_H
28#define __OMAP_PANEL_DATA_H 28#define __OMAP_PANEL_DATA_H
29 29
30#include <video/omapdss.h>
31#include <video/display_timing.h> 30#include <video/display_timing.h>
32 31
33struct omap_dss_device;
34
35/**
36 * encoder_tfp410 platform data
37 * @name: name for this display entity
38 * @power_down_gpio: gpio number for PD pin (or -1 if not available)
39 * @data_lines: number of DPI datalines
40 */
41struct encoder_tfp410_platform_data {
42 const char *name;
43 const char *source;
44 int power_down_gpio;
45 int data_lines;
46};
47
48
49/**
50 * connector_dvi platform data
51 * @name: name for this display entity
52 * @source: name of the display entity used as a video source
53 * @i2c_bus_num: i2c bus number to be used for reading EDID
54 */
55struct connector_dvi_platform_data {
56 const char *name;
57 const char *source;
58 int i2c_bus_num;
59};
60
61/**
62 * connector_hdmi platform data
63 * @name: name for this display entity
64 * @source: name of the display entity used as a video source
65 */
66struct connector_hdmi_platform_data {
67 const char *name;
68 const char *source;
69};
70
71/** 32/**
72 * connector_atv platform data 33 * connector_atv platform data
73 * @name: name for this display entity 34 * @name: name for this display entity
74 * @source: name of the display entity used as a video source 35 * @source: name of the display entity used as a video source
75 * @connector_type: composite/svideo
76 * @invert_polarity: invert signal polarity 36 * @invert_polarity: invert signal polarity
77 */ 37 */
78struct connector_atv_platform_data { 38struct connector_atv_platform_data {
79 const char *name; 39 const char *name;
80 const char *source; 40 const char *source;
81 41
82 enum omap_dss_venc_type connector_type;
83 bool invert_polarity; 42 bool invert_polarity;
84}; 43};
85 44
@@ -105,33 +64,6 @@ struct panel_dpi_platform_data {
105}; 64};
106 65
107/** 66/**
108 * panel_dsicm platform data
109 * @name: name for this display entity
110 * @source: name of the display entity used as a video source
111 * @reset_gpio: gpio to reset the panel (or -1)
112 * @use_ext_te: use external TE GPIO
113 * @ext_te_gpio: external TE GPIO
114 * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
115 * @use_dsi_backlight: true if panel uses DSI command to control backlight
116 * @pin_config: DSI pin configuration
117 */
118struct panel_dsicm_platform_data {
119 const char *name;
120 const char *source;
121
122 int reset_gpio;
123
124 bool use_ext_te;
125 int ext_te_gpio;
126
127 unsigned ulps_timeout;
128
129 bool use_dsi_backlight;
130
131 struct omap_dsi_pin_config pin_config;
132};
133
134/**
135 * panel_acx565akm platform data 67 * panel_acx565akm platform data
136 * @name: name for this display entity 68 * @name: name for this display entity
137 * @source: name of the display entity used as a video source 69 * @source: name of the display entity used as a video source
@@ -147,93 +79,4 @@ struct panel_acx565akm_platform_data {
147 int datapairs; 79 int datapairs;
148}; 80};
149 81
150/**
151 * panel_lb035q02 platform data
152 * @name: name for this display entity
153 * @source: name of the display entity used as a video source
154 * @data_lines: number of DPI datalines
155 * @backlight_gpio: gpio to enable/disable the backlight (or -1)
156 * @enable_gpio: gpio to enable/disable the panel (or -1)
157 */
158struct panel_lb035q02_platform_data {
159 const char *name;
160 const char *source;
161
162 int data_lines;
163
164 int backlight_gpio;
165 int enable_gpio;
166};
167
168/**
169 * panel_sharp_ls037v7dw01 platform data
170 * @name: name for this display entity
171 * @source: name of the display entity used as a video source
172 * @data_lines: number of DPI datalines
173 * @resb_gpio: reset signal GPIO
174 * @ini_gpio: power on control GPIO
175 * @mo_gpio: selection for resolution(VGA/QVGA) GPIO
176 * @lr_gpio: selection for horizontal scanning direction GPIO
177 * @ud_gpio: selection for vertical scanning direction GPIO
178 */
179struct panel_sharp_ls037v7dw01_platform_data {
180 const char *name;
181 const char *source;
182
183 int data_lines;
184
185 int resb_gpio;
186 int ini_gpio;
187 int mo_gpio;
188 int lr_gpio;
189 int ud_gpio;
190};
191
192/**
193 * panel-tpo-td043mtea1 platform data
194 * @name: name for this display entity
195 * @source: name of the display entity used as a video source
196 * @data_lines: number of DPI datalines
197 * @nreset_gpio: reset signal
198 */
199struct panel_tpo_td043mtea1_platform_data {
200 const char *name;
201 const char *source;
202
203 int data_lines;
204
205 int nreset_gpio;
206};
207
208/**
209 * panel-nec-nl8048hl11 platform data
210 * @name: name for this display entity
211 * @source: name of the display entity used as a video source
212 * @data_lines: number of DPI datalines
213 * @res_gpio: reset signal
214 * @qvga_gpio: selection for resolution(QVGA/WVGA)
215 */
216struct panel_nec_nl8048hl11_platform_data {
217 const char *name;
218 const char *source;
219
220 int data_lines;
221
222 int res_gpio;
223 int qvga_gpio;
224};
225
226/**
227 * panel-tpo-td028ttec1 platform data
228 * @name: name for display entity
229 * @source: name of the display entity used as a video source
230 * @data_lines: number of DPI datalines
231 */
232struct panel_tpo_td028ttec1_platform_data {
233 const char *name;
234 const char *source;
235
236 int data_lines;
237};
238
239#endif /* __OMAP_PANEL_DATA_H */ 82#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omapdss.h b/include/video/omapfb_dss.h
index 8e14ad7327c9..1d38901d599d 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapfb_dss.h
@@ -1,27 +1,20 @@
1/* 1/*
2 * Copyright (C) 2008 Nokia Corporation 2 * Copyright (C) 2016 Texas Instruments, Inc.
3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify
6 * under the terms of the GNU General Public License version 2 as published by 5 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 6 * the Free Software Foundation; either version 2 of the License, or
8 * 7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 8 */
17 9
18#ifndef __OMAP_OMAPDSS_H 10#ifndef __OMAPFB_DSS_H
19#define __OMAP_OMAPDSS_H 11#define __OMAPFB_DSS_H
20 12
21#include <linux/list.h> 13#include <linux/list.h>
22#include <linux/kobject.h> 14#include <linux/kobject.h>
23#include <linux/device.h> 15#include <linux/device.h>
24#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/platform_data/omapdss.h>
25 18
26#include <video/videomode.h> 19#include <video/videomode.h>
27 20
@@ -167,11 +160,6 @@ enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_ACTIVE, 160 OMAP_DSS_DISPLAY_ACTIVE,
168}; 161};
169 162
170struct omap_dss_audio {
171 struct snd_aes_iec958 *iec;
172 struct snd_cea_861_aud_if *cea;
173};
174
175enum omap_dss_rotation_type { 163enum omap_dss_rotation_type {
176 OMAP_DSS_ROT_DMA = 1 << 0, 164 OMAP_DSS_ROT_DMA = 1 << 0,
177 OMAP_DSS_ROT_VRFB = 1 << 1, 165 OMAP_DSS_ROT_VRFB = 1 << 1,
@@ -195,25 +183,6 @@ enum omap_overlay_caps {
195 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, 183 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
196}; 184};
197 185
198enum omap_overlay_manager_caps {
199 OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
200};
201
202enum omap_dss_clk_source {
203 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
204 * OMAP4: DSS_FCLK */
205 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
206 * OMAP4: PLL1_CLK1 */
207 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
208 * OMAP4: PLL1_CLK2 */
209 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
210 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
211};
212
213enum omap_hdmi_flags {
214 OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
215};
216
217enum omap_dss_output_id { 186enum omap_dss_output_id {
218 OMAP_DSS_OUTPUT_DPI = 1 << 0, 187 OMAP_DSS_OUTPUT_DPI = 1 << 0,
219 OMAP_DSS_OUTPUT_DBI = 1 << 1, 188 OMAP_DSS_OUTPUT_DBI = 1 << 1,
@@ -303,36 +272,6 @@ struct omap_dss_dsi_config {
303 enum omap_dss_dsi_trans_mode trans_mode; 272 enum omap_dss_dsi_trans_mode trans_mode;
304}; 273};
305 274
306enum omapdss_version {
307 OMAPDSS_VER_UNKNOWN = 0,
308 OMAPDSS_VER_OMAP24xx,
309 OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
310 OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
311 OMAPDSS_VER_OMAP3630,
312 OMAPDSS_VER_AM35xx,
313 OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
314 OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
315 OMAPDSS_VER_OMAP4, /* All other OMAP4s */
316 OMAPDSS_VER_OMAP5,
317 OMAPDSS_VER_AM43xx,
318 OMAPDSS_VER_DRA7xx,
319};
320
321/* Board specific data */
322struct omap_dss_board_info {
323 int num_devices;
324 struct omap_dss_device **devices;
325 struct omap_dss_device *default_device;
326 const char *default_display_name;
327 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
328 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
329 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
330 enum omapdss_version version;
331};
332
333/* Init with the board info */
334extern int omap_display_init(struct omap_dss_board_info *board_data);
335
336struct omap_video_timings { 275struct omap_video_timings {
337 /* Unit: pixels */ 276 /* Unit: pixels */
338 u16 x_res; 277 u16 x_res;
@@ -463,7 +402,6 @@ struct omap_overlay_manager {
463 /* static fields */ 402 /* static fields */
464 const char *name; 403 const char *name;
465 enum omap_channel id; 404 enum omap_channel id;
466 enum omap_overlay_manager_caps caps;
467 struct list_head overlays; 405 struct list_head overlays;
468 enum omap_display_type supported_displays; 406 enum omap_display_type supported_displays;
469 enum omap_dss_output_id supported_outputs; 407 enum omap_dss_output_id supported_outputs;
@@ -919,4 +857,4 @@ omapdss_of_get_first_endpoint(const struct device_node *parent);
919struct omap_dss_device * 857struct omap_dss_device *
920omapdss_of_find_source_for_first_ep(struct device_node *node); 858omapdss_of_find_source_for_first_ep(struct device_node *node);
921 859
922#endif 860#endif /* __OMAPFB_DSS_H */
diff --git a/init/main.c b/init/main.c
index 4c17fda5c2ff..eae02aa03c9e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void)
453} 453}
454 454
455# if THREAD_SIZE >= PAGE_SIZE 455# if THREAD_SIZE >= PAGE_SIZE
456void __init __weak thread_info_cache_init(void) 456void __init __weak thread_stack_cache_init(void)
457{ 457{
458} 458}
459#endif 459#endif
@@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
627 /* Should be run before the first non-init thread is created */ 627 /* Should be run before the first non-init thread is created */
628 init_espfix_bsp(); 628 init_espfix_bsp();
629#endif 629#endif
630 thread_info_cache_init(); 630 thread_stack_cache_init();
631 cred_init(); 631 cred_init();
632 fork_init(); 632 fork_init();
633 proc_caches_init(); 633 proc_caches_init();
@@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
708{ 708{
709 struct blacklist_entry *entry; 709 struct blacklist_entry *entry;
710 char fn_name[KSYM_SYMBOL_LEN]; 710 char fn_name[KSYM_SYMBOL_LEN];
711 unsigned long addr;
711 712
712 if (list_empty(&blacklisted_initcalls)) 713 if (list_empty(&blacklisted_initcalls))
713 return false; 714 return false;
714 715
715 sprint_symbol_no_offset(fn_name, (unsigned long)fn); 716 addr = (unsigned long) dereference_function_descriptor(fn);
717 sprint_symbol_no_offset(fn_name, addr);
716 718
717 list_for_each_entry(entry, &blacklisted_initcalls, next) { 719 list_for_each_entry(entry, &blacklisted_initcalls, next) {
718 if (!strcmp(fn_name, entry->buf)) { 720 if (!strcmp(fn_name, entry->buf)) {
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 04be7021f848..318858edb1cd 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = {
365 .name = "bpf", 365 .name = "bpf",
366 .mount = bpf_mount, 366 .mount = bpf_mount,
367 .kill_sb = kill_litter_super, 367 .kill_sb = kill_litter_super,
368 .fs_flags = FS_USERNS_MOUNT,
369}; 368};
370 369
371MODULE_ALIAS_FS("bpf"); 370MODULE_ALIAS_FS("bpf");
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 274450efea90..9c51ec3f0f44 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event)
3862 if (event->ctx) 3862 if (event->ctx)
3863 put_ctx(event->ctx); 3863 put_ctx(event->ctx);
3864 3864
3865 if (event->pmu) { 3865 exclusive_event_destroy(event);
3866 exclusive_event_destroy(event); 3866 module_put(event->pmu->module);
3867 module_put(event->pmu->module);
3868 }
3869 3867
3870 call_rcu(&event->rcu_head, free_event_rcu); 3868 call_rcu(&event->rcu_head, free_event_rcu);
3871} 3869}
diff --git a/kernel/fork.c b/kernel/fork.c
index 5c2c355aa97f..4a7ec0c6c88c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
148} 148}
149#endif 149#endif
150 150
151void __weak arch_release_thread_info(struct thread_info *ti) 151void __weak arch_release_thread_stack(unsigned long *stack)
152{ 152{
153} 153}
154 154
155#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR 155#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
156 156
157/* 157/*
158 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 158 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
159 * kmemcache based allocator. 159 * kmemcache based allocator.
160 */ 160 */
161# if THREAD_SIZE >= PAGE_SIZE 161# if THREAD_SIZE >= PAGE_SIZE
162static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 162static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
163 int node) 163 int node)
164{ 164{
165 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, 165 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
172 return page ? page_address(page) : NULL; 172 return page ? page_address(page) : NULL;
173} 173}
174 174
175static inline void free_thread_info(struct thread_info *ti) 175static inline void free_thread_stack(unsigned long *stack)
176{ 176{
177 struct page *page = virt_to_page(ti); 177 struct page *page = virt_to_page(stack);
178 178
179 memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, 179 memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
180 -(1 << THREAD_SIZE_ORDER)); 180 -(1 << THREAD_SIZE_ORDER));
181 __free_kmem_pages(page, THREAD_SIZE_ORDER); 181 __free_kmem_pages(page, THREAD_SIZE_ORDER);
182} 182}
183# else 183# else
184static struct kmem_cache *thread_info_cache; 184static struct kmem_cache *thread_stack_cache;
185 185
186static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 186static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
187 int node) 187 int node)
188{ 188{
189 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); 189 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
190} 190}
191 191
192static void free_thread_info(struct thread_info *ti) 192static void free_thread_stack(unsigned long *stack)
193{ 193{
194 kmem_cache_free(thread_info_cache, ti); 194 kmem_cache_free(thread_stack_cache, stack);
195} 195}
196 196
197void thread_info_cache_init(void) 197void thread_stack_cache_init(void)
198{ 198{
199 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 199 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
200 THREAD_SIZE, 0, NULL); 200 THREAD_SIZE, 0, NULL);
201 BUG_ON(thread_info_cache == NULL); 201 BUG_ON(thread_stack_cache == NULL);
202} 202}
203# endif 203# endif
204#endif 204#endif
@@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep;
221/* SLAB cache for mm_struct structures (tsk->mm) */ 221/* SLAB cache for mm_struct structures (tsk->mm) */
222static struct kmem_cache *mm_cachep; 222static struct kmem_cache *mm_cachep;
223 223
224static void account_kernel_stack(struct thread_info *ti, int account) 224static void account_kernel_stack(unsigned long *stack, int account)
225{ 225{
226 struct zone *zone = page_zone(virt_to_page(ti)); 226 struct zone *zone = page_zone(virt_to_page(stack));
227 227
228 mod_zone_page_state(zone, NR_KERNEL_STACK, account); 228 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
229} 229}
@@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
231void free_task(struct task_struct *tsk) 231void free_task(struct task_struct *tsk)
232{ 232{
233 account_kernel_stack(tsk->stack, -1); 233 account_kernel_stack(tsk->stack, -1);
234 arch_release_thread_info(tsk->stack); 234 arch_release_thread_stack(tsk->stack);
235 free_thread_info(tsk->stack); 235 free_thread_stack(tsk->stack);
236 rt_mutex_debug_task_free(tsk); 236 rt_mutex_debug_task_free(tsk);
237 ftrace_graph_exit_task(tsk); 237 ftrace_graph_exit_task(tsk);
238 put_seccomp_filter(tsk); 238 put_seccomp_filter(tsk);
@@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
343static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 343static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
344{ 344{
345 struct task_struct *tsk; 345 struct task_struct *tsk;
346 struct thread_info *ti; 346 unsigned long *stack;
347 int err; 347 int err;
348 348
349 if (node == NUMA_NO_NODE) 349 if (node == NUMA_NO_NODE)
@@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
352 if (!tsk) 352 if (!tsk)
353 return NULL; 353 return NULL;
354 354
355 ti = alloc_thread_info_node(tsk, node); 355 stack = alloc_thread_stack_node(tsk, node);
356 if (!ti) 356 if (!stack)
357 goto free_tsk; 357 goto free_tsk;
358 358
359 err = arch_dup_task_struct(tsk, orig); 359 err = arch_dup_task_struct(tsk, orig);
360 if (err) 360 if (err)
361 goto free_ti; 361 goto free_stack;
362 362
363 tsk->stack = ti; 363 tsk->stack = stack;
364#ifdef CONFIG_SECCOMP 364#ifdef CONFIG_SECCOMP
365 /* 365 /*
366 * We must handle setting up seccomp filters once we're under 366 * We must handle setting up seccomp filters once we're under
@@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
392 tsk->task_frag.page = NULL; 392 tsk->task_frag.page = NULL;
393 tsk->wake_q.next = NULL; 393 tsk->wake_q.next = NULL;
394 394
395 account_kernel_stack(ti, 1); 395 account_kernel_stack(stack, 1);
396 396
397 kcov_task_init(tsk); 397 kcov_task_init(tsk);
398 398
399 return tsk; 399 return tsk;
400 400
401free_ti: 401free_stack:
402 free_thread_info(ti); 402 free_thread_stack(stack);
403free_tsk: 403free_tsk:
404 free_task_struct(tsk); 404 free_task_struct(tsk);
405 return NULL; 405 return NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index ee25f5ba4aca..33664f70e2d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
469{ 469{
470 unsigned long address = (unsigned long)uaddr; 470 unsigned long address = (unsigned long)uaddr;
471 struct mm_struct *mm = current->mm; 471 struct mm_struct *mm = current->mm;
472 struct page *page; 472 struct page *page, *tail;
473 struct address_space *mapping; 473 struct address_space *mapping;
474 int err, ro = 0; 474 int err, ro = 0;
475 475
@@ -530,7 +530,15 @@ again:
530 * considered here and page lock forces unnecessarily serialization 530 * considered here and page lock forces unnecessarily serialization
531 * From this point on, mapping will be re-verified if necessary and 531 * From this point on, mapping will be re-verified if necessary and
532 * page lock will be acquired only if it is unavoidable 532 * page lock will be acquired only if it is unavoidable
533 */ 533 *
534 * Mapping checks require the head page for any compound page so the
535 * head page and mapping is looked up now. For anonymous pages, it
536 * does not matter if the page splits in the future as the key is
537 * based on the address. For filesystem-backed pages, the tail is
538 * required as the index of the page determines the key. For
539 * base pages, there is no tail page and tail == page.
540 */
541 tail = page;
534 page = compound_head(page); 542 page = compound_head(page);
535 mapping = READ_ONCE(page->mapping); 543 mapping = READ_ONCE(page->mapping);
536 544
@@ -654,7 +662,7 @@ again:
654 662
655 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 663 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
656 key->shared.inode = inode; 664 key->shared.inode = inode;
657 key->shared.pgoff = basepage_index(page); 665 key->shared.pgoff = basepage_index(tail);
658 rcu_read_unlock(); 666 rcu_read_unlock();
659 } 667 }
660 668
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c42742208e5e..89b49f6773f0 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
125 125
126 domain = data->domain; 126 domain = data->domain;
127 if (WARN_ON(domain == NULL)) 127 if (WARN_ON(domain == NULL))
128 return; 128 return -EINVAL;
129 129
130 if (!irq_domain_is_ipi(domain)) { 130 if (!irq_domain_is_ipi(domain)) {
131 pr_warn("Trying to destroy a non IPI domain!\n"); 131 pr_warn("Trying to destroy a non IPI domain!\n");
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..4b353e0be121 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
58 58
59void static_key_slow_inc(struct static_key *key) 59void static_key_slow_inc(struct static_key *key)
60{ 60{
61 int v, v1;
62
61 STATIC_KEY_CHECK_USE(); 63 STATIC_KEY_CHECK_USE();
62 if (atomic_inc_not_zero(&key->enabled)) 64
63 return; 65 /*
66 * Careful if we get concurrent static_key_slow_inc() calls;
67 * later calls must wait for the first one to _finish_ the
68 * jump_label_update() process. At the same time, however,
69 * the jump_label_update() call below wants to see
70 * static_key_enabled(&key) for jumps to be updated properly.
71 *
72 * So give a special meaning to negative key->enabled: it sends
73 * static_key_slow_inc() down the slow path, and it is non-zero
74 * so it counts as "enabled" in jump_label_update(). Note that
75 * atomic_inc_unless_negative() checks >= 0, so roll our own.
76 */
77 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
78 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
79 if (likely(v1 == v))
80 return;
81 }
64 82
65 jump_label_lock(); 83 jump_label_lock();
66 if (atomic_inc_return(&key->enabled) == 1) 84 if (atomic_read(&key->enabled) == 0) {
85 atomic_set(&key->enabled, -1);
67 jump_label_update(key); 86 jump_label_update(key);
87 atomic_set(&key->enabled, 1);
88 } else {
89 atomic_inc(&key->enabled);
90 }
68 jump_label_unlock(); 91 jump_label_unlock();
69} 92}
70EXPORT_SYMBOL_GPL(static_key_slow_inc); 93EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
72static void __static_key_slow_dec(struct static_key *key, 95static void __static_key_slow_dec(struct static_key *key,
73 unsigned long rate_limit, struct delayed_work *work) 96 unsigned long rate_limit, struct delayed_work *work)
74{ 97{
98 /*
99 * The negative count check is valid even when a negative
100 * key->enabled is in use by static_key_slow_inc(); a
101 * __static_key_slow_dec() before the first static_key_slow_inc()
102 * returns is unbalanced, because all other static_key_slow_inc()
103 * instances block while the update is in progress.
104 */
75 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 105 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
76 WARN(atomic_read(&key->enabled) < 0, 106 WARN(atomic_read(&key->enabled) < 0,
77 "jump label: negative count!\n"); 107 "jump label: negative count!\n");
diff --git a/kernel/kcov.c b/kernel/kcov.c
index a02f2dddd1d7..8d44b3fea9d0 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = {
264 264
265static int __init kcov_init(void) 265static int __init kcov_init(void)
266{ 266{
267 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) { 267 /*
268 * The kcov debugfs file won't ever get removed and thus,
269 * there is no need to protect it against removal races. The
270 * use of debugfs_create_file_unsafe() is actually safe here.
271 */
272 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
268 pr_err("failed to create kcov in debugfs\n"); 273 pr_err("failed to create kcov in debugfs\n");
269 return -ENOMEM; 274 return -ENOMEM;
270 } 275 }
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 3ef3736002d8..9c951fade415 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
49} 49}
50 50
51void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 51void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
52 struct thread_info *ti) 52 struct task_struct *task)
53{ 53{
54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); 54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
55 55
56 /* Mark the current thread as blocked on the lock: */ 56 /* Mark the current thread as blocked on the lock: */
57 ti->task->blocked_on = waiter; 57 task->blocked_on = waiter;
58} 58}
59 59
60void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 60void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61 struct thread_info *ti) 61 struct task_struct *task)
62{ 62{
63 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); 63 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64 DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); 64 DEBUG_LOCKS_WARN_ON(waiter->task != task);
65 DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); 65 DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66 ti->task->blocked_on = NULL; 66 task->blocked_on = NULL;
67 67
68 list_del_init(&waiter->list); 68 list_del_init(&waiter->list);
69 waiter->task = NULL; 69 waiter->task = NULL;
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 0799fd3e4cfa..d06ae3bb46c5 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
20extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); 20extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
21extern void debug_mutex_add_waiter(struct mutex *lock, 21extern void debug_mutex_add_waiter(struct mutex *lock,
22 struct mutex_waiter *waiter, 22 struct mutex_waiter *waiter,
23 struct thread_info *ti); 23 struct task_struct *task);
24extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 24extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
25 struct thread_info *ti); 25 struct task_struct *task);
26extern void debug_mutex_unlock(struct mutex *lock); 26extern void debug_mutex_unlock(struct mutex *lock);
27extern void debug_mutex_init(struct mutex *lock, const char *name, 27extern void debug_mutex_init(struct mutex *lock, const char *name,
28 struct lock_class_key *key); 28 struct lock_class_key *key);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index e364b424b019..a70b90db3909 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
486 if (!hold_ctx) 486 if (!hold_ctx)
487 return 0; 487 return 0;
488 488
489 if (unlikely(ctx == hold_ctx))
490 return -EALREADY;
491
492 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 489 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
493 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 490 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
494#ifdef CONFIG_DEBUG_MUTEXES 491#ifdef CONFIG_DEBUG_MUTEXES
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
514 unsigned long flags; 511 unsigned long flags;
515 int ret; 512 int ret;
516 513
514 if (use_ww_ctx) {
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
516 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
517 return -EALREADY;
518 }
519
517 preempt_disable(); 520 preempt_disable();
518 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 521 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
519 522
@@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
534 goto skip_wait; 537 goto skip_wait;
535 538
536 debug_mutex_lock_common(lock, &waiter); 539 debug_mutex_lock_common(lock, &waiter);
537 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 540 debug_mutex_add_waiter(lock, &waiter, task);
538 541
539 /* add waiting tasks to the end of the waitqueue (FIFO): */ 542 /* add waiting tasks to the end of the waitqueue (FIFO): */
540 list_add_tail(&waiter.list, &lock->wait_list); 543 list_add_tail(&waiter.list, &lock->wait_list);
@@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
581 } 584 }
582 __set_task_state(task, TASK_RUNNING); 585 __set_task_state(task, TASK_RUNNING);
583 586
584 mutex_remove_waiter(lock, &waiter, current_thread_info()); 587 mutex_remove_waiter(lock, &waiter, task);
585 /* set it to 0 if there are no waiters left: */ 588 /* set it to 0 if there are no waiters left: */
586 if (likely(list_empty(&lock->wait_list))) 589 if (likely(list_empty(&lock->wait_list)))
587 atomic_set(&lock->count, 0); 590 atomic_set(&lock->count, 0);
@@ -602,7 +605,7 @@ skip_wait:
602 return 0; 605 return 0;
603 606
604err: 607err:
605 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 608 mutex_remove_waiter(lock, &waiter, task);
606 spin_unlock_mutex(&lock->wait_lock, flags); 609 spin_unlock_mutex(&lock->wait_lock, flags);
607 debug_mutex_free_waiter(&waiter); 610 debug_mutex_free_waiter(&waiter);
608 mutex_release(&lock->dep_map, 1, ip); 611 mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 5cda397607f2..a68bae5e852a 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -13,7 +13,7 @@
13 do { spin_lock(lock); (void)(flags); } while (0) 13 do { spin_lock(lock); (void)(flags); } while (0)
14#define spin_unlock_mutex(lock, flags) \ 14#define spin_unlock_mutex(lock, flags) \
15 do { spin_unlock(lock); (void)(flags); } while (0) 15 do { spin_unlock(lock); (void)(flags); } while (0)
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, task) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 19#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ce2f75e32ae1..5fc8c311b8fe 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
268#endif 268#endif
269 269
270/*
271 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
272 * issuing an _unordered_ store to set _Q_LOCKED_VAL.
273 *
274 * This means that the store can be delayed, but no later than the
275 * store-release from the unlock. This means that simply observing
276 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
277 *
278 * There are two paths that can issue the unordered store:
279 *
280 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
281 *
282 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
283 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
284 *
285 * However, in both cases we have other !0 state we've set before to queue
286 * ourseves:
287 *
288 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
289 * load is constrained by that ACQUIRE to not pass before that, and thus must
290 * observe the store.
291 *
292 * For (2) we have a more intersting scenario. We enqueue ourselves using
293 * xchg_tail(), which ends up being a RELEASE. This in itself is not
294 * sufficient, however that is followed by an smp_cond_acquire() on the same
295 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
296 * guarantees we must observe that store.
297 *
298 * Therefore both cases have other !0 state that is observable before the
299 * unordered locked byte store comes through. This means we can use that to
300 * wait for the lock store, and then wait for an unlock.
301 */
302#ifndef queued_spin_unlock_wait
303void queued_spin_unlock_wait(struct qspinlock *lock)
304{
305 u32 val;
306
307 for (;;) {
308 val = atomic_read(&lock->val);
309
310 if (!val) /* not locked, we're done */
311 goto done;
312
313 if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
314 break;
315
316 /* not locked, but pending, wait until we observe the lock */
317 cpu_relax();
318 }
319
320 /* any unlock is good */
321 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
322 cpu_relax();
323
324done:
325 smp_rmb(); /* CTRL + RMB -> ACQUIRE */
326}
327EXPORT_SYMBOL(queued_spin_unlock_wait);
328#endif
329
270#endif /* _GEN_PV_LOCK_SLOWPATH */ 330#endif /* _GEN_PV_LOCK_SLOWPATH */
271 331
272/** 332/**
diff --git a/kernel/power/process.c b/kernel/power/process.c
index df058bed53ce..0c2ee9761d57 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -146,6 +146,18 @@ int freeze_processes(void)
146 if (!error && !oom_killer_disable()) 146 if (!error && !oom_killer_disable())
147 error = -EBUSY; 147 error = -EBUSY;
148 148
149 /*
150 * There is a hard to fix race between oom_reaper kernel thread
151 * and oom_killer_disable. oom_reaper calls exit_oom_victim
152 * before the victim reaches exit_mm so try to freeze all the tasks
153 * again and catch such a left over task.
154 */
155 if (!error) {
156 pr_info("Double checking all user space processes after OOM killer disable... ");
157 error = try_to_freeze_tasks(true);
158 pr_cont("\n");
159 }
160
149 if (error) 161 if (error)
150 thaw_processes(); 162 thaw_processes();
151 return error; 163 return error;
diff --git a/kernel/relay.c b/kernel/relay.c
index 074994bcfa9b..04d7cf3ef8cf 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -614,6 +614,7 @@ free_bufs:
614 614
615 kref_put(&chan->kref, relay_destroy_channel); 615 kref_put(&chan->kref, relay_destroy_channel);
616 mutex_unlock(&relay_channels_mutex); 616 mutex_unlock(&relay_channels_mutex);
617 kfree(chan);
617 return NULL; 618 return NULL;
618} 619}
619EXPORT_SYMBOL_GPL(relay_open); 620EXPORT_SYMBOL_GPL(relay_open);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f2cae4620c7..51d7105f529a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1536 for (;;) { 1536 for (;;) {
1537 /* Any allowed, online CPU? */ 1537 /* Any allowed, online CPU? */
1538 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1538 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1539 if (!cpu_active(dest_cpu)) 1539 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1540 continue;
1541 if (!cpu_online(dest_cpu))
1540 continue; 1542 continue;
1541 goto out; 1543 goto out;
1542 } 1544 }
@@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
2253#endif 2255#endif
2254#endif 2256#endif
2255 2257
2258#ifdef CONFIG_SCHEDSTATS
2259
2256DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2260DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2261static bool __initdata __sched_schedstats = false;
2257 2262
2258#ifdef CONFIG_SCHEDSTATS
2259static void set_schedstats(bool enabled) 2263static void set_schedstats(bool enabled)
2260{ 2264{
2261 if (enabled) 2265 if (enabled)
@@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str)
2278 if (!str) 2282 if (!str)
2279 goto out; 2283 goto out;
2280 2284
2285 /*
2286 * This code is called before jump labels have been set up, so we can't
2287 * change the static branch directly just yet. Instead set a temporary
2288 * variable so init_schedstats() can do it later.
2289 */
2281 if (!strcmp(str, "enable")) { 2290 if (!strcmp(str, "enable")) {
2282 set_schedstats(true); 2291 __sched_schedstats = true;
2283 ret = 1; 2292 ret = 1;
2284 } else if (!strcmp(str, "disable")) { 2293 } else if (!strcmp(str, "disable")) {
2285 set_schedstats(false); 2294 __sched_schedstats = false;
2286 ret = 1; 2295 ret = 1;
2287 } 2296 }
2288out: 2297out:
@@ -2293,6 +2302,11 @@ out:
2293} 2302}
2294__setup("schedstats=", setup_schedstats); 2303__setup("schedstats=", setup_schedstats);
2295 2304
2305static void __init init_schedstats(void)
2306{
2307 set_schedstats(__sched_schedstats);
2308}
2309
2296#ifdef CONFIG_PROC_SYSCTL 2310#ifdef CONFIG_PROC_SYSCTL
2297int sysctl_schedstats(struct ctl_table *table, int write, 2311int sysctl_schedstats(struct ctl_table *table, int write,
2298 void __user *buffer, size_t *lenp, loff_t *ppos) 2312 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write,
2313 set_schedstats(state); 2327 set_schedstats(state);
2314 return err; 2328 return err;
2315} 2329}
2316#endif 2330#endif /* CONFIG_PROC_SYSCTL */
2317#endif 2331#else /* !CONFIG_SCHEDSTATS */
2332static inline void init_schedstats(void) {}
2333#endif /* CONFIG_SCHEDSTATS */
2318 2334
2319/* 2335/*
2320 * fork()/clone()-time setup: 2336 * fork()/clone()-time setup:
@@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p)
2521 */ 2537 */
2522 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2538 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2523#endif 2539#endif
2524 /* Post initialize new task's util average when its cfs_rq is set */ 2540 rq = __task_rq_lock(p, &rf);
2525 post_init_entity_util_avg(&p->se); 2541 post_init_entity_util_avg(&p->se);
2526 2542
2527 rq = __task_rq_lock(p, &rf);
2528 activate_task(rq, p, 0); 2543 activate_task(rq, p, 0);
2529 p->on_rq = TASK_ON_RQ_QUEUED; 2544 p->on_rq = TASK_ON_RQ_QUEUED;
2530 trace_sched_wakeup_new(p); 2545 trace_sched_wakeup_new(p);
@@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
3156static inline void schedule_debug(struct task_struct *prev) 3171static inline void schedule_debug(struct task_struct *prev)
3157{ 3172{
3158#ifdef CONFIG_SCHED_STACK_END_CHECK 3173#ifdef CONFIG_SCHED_STACK_END_CHECK
3159 BUG_ON(task_stack_end_corrupted(prev)); 3174 if (task_stack_end_corrupted(prev))
3175 panic("corrupted stack end detected inside scheduler\n");
3160#endif 3176#endif
3161 3177
3162 if (unlikely(in_atomic_preempt_off())) { 3178 if (unlikely(in_atomic_preempt_off())) {
@@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter)
5133 /* 5149 /*
5134 * reset the NMI-timeout, listing all files on a slow 5150 * reset the NMI-timeout, listing all files on a slow
5135 * console might take a lot of time: 5151 * console might take a lot of time:
5152 * Also, reset softlockup watchdogs on all CPUs, because
5153 * another CPU might be blocked waiting for us to process
5154 * an IPI.
5136 */ 5155 */
5137 touch_nmi_watchdog(); 5156 touch_nmi_watchdog();
5157 touch_all_softlockup_watchdogs();
5138 if (!state_filter || (p->state & state_filter)) 5158 if (!state_filter || (p->state & state_filter))
5139 sched_show_task(p); 5159 sched_show_task(p);
5140 } 5160 }
5141 5161
5142 touch_all_softlockup_watchdogs();
5143
5144#ifdef CONFIG_SCHED_DEBUG 5162#ifdef CONFIG_SCHED_DEBUG
5145 if (!state_filter) 5163 if (!state_filter)
5146 sysrq_sched_debug_show(); 5164 sysrq_sched_debug_show();
@@ -7487,6 +7505,8 @@ void __init sched_init(void)
7487#endif 7505#endif
7488 init_sched_fair_class(); 7506 init_sched_fair_class();
7489 7507
7508 init_schedstats();
7509
7490 scheduler_running = 1; 7510 scheduler_running = 1;
7491} 7511}
7492 7512
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index cf905f655ba1..0368c393a336 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
427 SPLIT_NS(p->se.vruntime), 427 SPLIT_NS(p->se.vruntime),
428 (long long)(p->nvcsw + p->nivcsw), 428 (long long)(p->nvcsw + p->nivcsw),
429 p->prio); 429 p->prio);
430#ifdef CONFIG_SCHEDSTATS 430
431 if (schedstat_enabled()) {
432 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
433 SPLIT_NS(p->se.statistics.wait_sum),
434 SPLIT_NS(p->se.sum_exec_runtime),
435 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
436 }
437#else
438 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 431 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
439 0LL, 0L, 432 SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
440 SPLIT_NS(p->se.sum_exec_runtime), 433 SPLIT_NS(p->se.sum_exec_runtime),
441 0LL, 0L); 434 SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
442#endif 435
443#ifdef CONFIG_NUMA_BALANCING 436#ifdef CONFIG_NUMA_BALANCING
444 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); 437 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
445#endif 438#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 218f8e83db73..bdcbeea90c95 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2904,6 +2904,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
2904 } 2904 }
2905} 2905}
2906 2906
2907/*
2908 * Unsigned subtract and clamp on underflow.
2909 *
2910 * Explicitly do a load-store to ensure the intermediate value never hits
2911 * memory. This allows lockless observations without ever seeing the negative
2912 * values.
2913 */
2914#define sub_positive(_ptr, _val) do { \
2915 typeof(_ptr) ptr = (_ptr); \
2916 typeof(*ptr) val = (_val); \
2917 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2918 res = var - val; \
2919 if (res > var) \
2920 res = 0; \
2921 WRITE_ONCE(*ptr, res); \
2922} while (0)
2923
2907/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ 2924/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2908static inline int 2925static inline int
2909update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) 2926update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
@@ -2913,15 +2930,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
2913 2930
2914 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2931 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2915 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2932 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2916 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2933 sub_positive(&sa->load_avg, r);
2917 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2934 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
2918 removed_load = 1; 2935 removed_load = 1;
2919 } 2936 }
2920 2937
2921 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 2938 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2922 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); 2939 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2923 sa->util_avg = max_t(long, sa->util_avg - r, 0); 2940 sub_positive(&sa->util_avg, r);
2924 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0); 2941 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
2925 removed_util = 1; 2942 removed_util = 1;
2926 } 2943 }
2927 2944
@@ -2994,10 +3011,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
2994 &se->avg, se->on_rq * scale_load_down(se->load.weight), 3011 &se->avg, se->on_rq * scale_load_down(se->load.weight),
2995 cfs_rq->curr == se, NULL); 3012 cfs_rq->curr == se, NULL);
2996 3013
2997 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); 3014 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
2998 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); 3015 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
2999 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); 3016 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3000 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); 3017 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3001 3018
3002 cfs_rq_util_change(cfs_rq); 3019 cfs_rq_util_change(cfs_rq);
3003} 3020}
@@ -3246,7 +3263,7 @@ static inline void check_schedstat_required(void)
3246 trace_sched_stat_iowait_enabled() || 3263 trace_sched_stat_iowait_enabled() ||
3247 trace_sched_stat_blocked_enabled() || 3264 trace_sched_stat_blocked_enabled() ||
3248 trace_sched_stat_runtime_enabled()) { 3265 trace_sched_stat_runtime_enabled()) {
3249 pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3266 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3250 "stat_blocked and stat_runtime require the " 3267 "stat_blocked and stat_runtime require the "
3251 "kernel parameter schedstats=enabled or " 3268 "kernel parameter schedstats=enabled or "
3252 "kernel.sched_schedstats=1\n"); 3269 "kernel.sched_schedstats=1\n");
@@ -4185,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4185 if (!cfs_bandwidth_used()) 4202 if (!cfs_bandwidth_used())
4186 return; 4203 return;
4187 4204
4205 /* Synchronize hierarchical throttle counter: */
4206 if (unlikely(!cfs_rq->throttle_uptodate)) {
4207 struct rq *rq = rq_of(cfs_rq);
4208 struct cfs_rq *pcfs_rq;
4209 struct task_group *tg;
4210
4211 cfs_rq->throttle_uptodate = 1;
4212
4213 /* Get closest up-to-date node, because leaves go first: */
4214 for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
4215 pcfs_rq = tg->cfs_rq[cpu_of(rq)];
4216 if (pcfs_rq->throttle_uptodate)
4217 break;
4218 }
4219 if (tg) {
4220 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4221 cfs_rq->throttled_clock_task = rq_clock_task(rq);
4222 }
4223 }
4224
4188 /* an active group must be handled by the update_curr()->put() path */ 4225 /* an active group must be handled by the update_curr()->put() path */
4189 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4226 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4190 return; 4227 return;
@@ -4500,15 +4537,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4500 4537
4501 /* Don't dequeue parent if it has other entities besides us */ 4538 /* Don't dequeue parent if it has other entities besides us */
4502 if (cfs_rq->load.weight) { 4539 if (cfs_rq->load.weight) {
4540 /* Avoid re-evaluating load for this entity: */
4541 se = parent_entity(se);
4503 /* 4542 /*
4504 * Bias pick_next to pick a task from this cfs_rq, as 4543 * Bias pick_next to pick a task from this cfs_rq, as
4505 * p is sleeping when it is within its sched_slice. 4544 * p is sleeping when it is within its sched_slice.
4506 */ 4545 */
4507 if (task_sleep && parent_entity(se)) 4546 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4508 set_next_buddy(parent_entity(se)); 4547 set_next_buddy(se);
4509
4510 /* avoid re-evaluating load for this entity */
4511 se = parent_entity(se);
4512 break; 4548 break;
4513 } 4549 }
4514 flags |= DEQUEUE_SLEEP; 4550 flags |= DEQUEUE_SLEEP;
@@ -8496,8 +8532,9 @@ void free_fair_sched_group(struct task_group *tg)
8496 8532
8497int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8533int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8498{ 8534{
8499 struct cfs_rq *cfs_rq;
8500 struct sched_entity *se; 8535 struct sched_entity *se;
8536 struct cfs_rq *cfs_rq;
8537 struct rq *rq;
8501 int i; 8538 int i;
8502 8539
8503 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); 8540 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8512,6 +8549,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8512 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 8549 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8513 8550
8514 for_each_possible_cpu(i) { 8551 for_each_possible_cpu(i) {
8552 rq = cpu_rq(i);
8553
8515 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 8554 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8516 GFP_KERNEL, cpu_to_node(i)); 8555 GFP_KERNEL, cpu_to_node(i));
8517 if (!cfs_rq) 8556 if (!cfs_rq)
@@ -8525,7 +8564,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8525 init_cfs_rq(cfs_rq); 8564 init_cfs_rq(cfs_rq);
8526 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 8565 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8527 init_entity_runnable_average(se); 8566 init_entity_runnable_average(se);
8567
8568 raw_spin_lock_irq(&rq->lock);
8528 post_init_entity_util_avg(se); 8569 post_init_entity_util_avg(se);
8570 raw_spin_unlock_irq(&rq->lock);
8529 } 8571 }
8530 8572
8531 return 1; 8573 return 1;
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index bd12c6c714ec..c5aeedf4e93a 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
127 */ 127 */
128static void cpuidle_idle_call(void) 128static void cpuidle_idle_call(void)
129{ 129{
130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 130 struct cpuidle_device *dev = cpuidle_get_device();
131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
132 int next_state, entered_state; 132 int next_state, entered_state;
133 133
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 72f1f3087b04..7cbeb92a1cb9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -437,7 +437,7 @@ struct cfs_rq {
437 437
438 u64 throttled_clock, throttled_clock_task; 438 u64 throttled_clock, throttled_clock_task;
439 u64 throttled_clock_task_time; 439 u64 throttled_clock_task_time;
440 int throttled, throttle_count; 440 int throttled, throttle_count, throttle_uptodate;
441 struct list_head throttled_list; 441 struct list_head throttled_list;
442#endif /* CONFIG_CFS_BANDWIDTH */ 442#endif /* CONFIG_CFS_BANDWIDTH */
443#endif /* CONFIG_FAIR_GROUP_SCHED */ 443#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 70b3b6a20fb0..78955cbea31c 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
33# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) 33# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
34# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) 34# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
35# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) 35# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
36# define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0)
37
36#else /* !CONFIG_SCHEDSTATS */ 38#else /* !CONFIG_SCHEDSTATS */
37static inline void 39static inline void
38rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 40rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
47# define schedstat_inc(rq, field) do { } while (0) 49# define schedstat_inc(rq, field) do { } while (0)
48# define schedstat_add(rq, field, amt) do { } while (0) 50# define schedstat_add(rq, field, amt) do { } while (0)
49# define schedstat_set(var, val) do { } while (0) 51# define schedstat_set(var, val) do { } while (0)
52# define schedstat_val(rq, field) 0
50#endif 53#endif
51 54
52#ifdef CONFIG_SCHED_INFO 55#ifdef CONFIG_SCHED_INFO
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8c7392c4fdbd..e99df0ff1d42 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
425{ 425{
426 debug_object_free(timer, &hrtimer_debug_descr); 426 debug_object_free(timer, &hrtimer_debug_descr);
427} 427}
428EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
428 429
429#else 430#else
430static inline void debug_hrtimer_init(struct hrtimer *timer) { } 431static inline void debug_hrtimer_init(struct hrtimer *timer) { }
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 780bcbe1d4de..720b7bb01d43 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
198 if (unlikely(index >= array->map.max_entries)) 198 if (unlikely(index >= array->map.max_entries))
199 return -E2BIG; 199 return -E2BIG;
200 200
201 file = (struct file *)array->ptrs[index]; 201 file = READ_ONCE(array->ptrs[index]);
202 if (unlikely(!file)) 202 if (unlikely(!file))
203 return -ENOENT; 203 return -ENOENT;
204 204
@@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
247 if (unlikely(index >= array->map.max_entries)) 247 if (unlikely(index >= array->map.max_entries))
248 return -E2BIG; 248 return -E2BIG;
249 249
250 file = (struct file *)array->ptrs[index]; 250 file = READ_ONCE(array->ptrs[index]);
251 if (unlikely(!file)) 251 if (unlikely(!file))
252 return -ENOENT; 252 return -ENOENT;
253 253
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index f96f0383f6c6..ad1d6164e946 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
36static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) 36static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
37{ 37{
38 struct trace_bprintk_fmt *pos; 38 struct trace_bprintk_fmt *pos;
39
40 if (!fmt)
41 return ERR_PTR(-EINVAL);
42
39 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) { 43 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
40 if (!strcmp(pos->fmt, fmt)) 44 if (!strcmp(pos->fmt, fmt))
41 return pos; 45 return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
57 for (iter = start; iter < end; iter++) { 61 for (iter = start; iter < end; iter++) {
58 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); 62 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
59 if (tb_fmt) { 63 if (tb_fmt) {
60 *iter = tb_fmt->fmt; 64 if (!IS_ERR(tb_fmt))
65 *iter = tb_fmt->fmt;
61 continue; 66 continue;
62 } 67 }
63 68
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 77d7d034bac3..b9cfdbfae9aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1841,6 +1841,9 @@ config TEST_BITMAP
1841 1841
1842 If unsure, say N. 1842 If unsure, say N.
1843 1843
1844config TEST_UUID
1845 tristate "Test functions located in the uuid module at runtime"
1846
1844config TEST_RHASHTABLE 1847config TEST_RHASHTABLE
1845 tristate "Perform selftest on resizable hash table" 1848 tristate "Perform selftest on resizable hash table"
1846 default n 1849 default n
diff --git a/lib/Makefile b/lib/Makefile
index 499fb354d627..ff6a7a6c6395 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
58obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o 58obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
59obj-$(CONFIG_TEST_PRINTF) += test_printf.o 59obj-$(CONFIG_TEST_PRINTF) += test_printf.o
60obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 60obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
61obj-$(CONFIG_TEST_UUID) += test_uuid.o
61 62
62ifeq ($(CONFIG_DEBUG_KOBJECT),y) 63ifeq ($(CONFIG_DEBUG_KOBJECT),y)
63CFLAGS_kobject.o += -DDEBUG 64CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644
index 000000000000..547d3127a3cf
--- /dev/null
+++ b/lib/test_uuid.c
@@ -0,0 +1,133 @@
1/*
2 * Test cases for lib/uuid.c module.
3 */
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/string.h>
10#include <linux/uuid.h>
11
12struct test_uuid_data {
13 const char *uuid;
14 uuid_le le;
15 uuid_be be;
16};
17
18static const struct test_uuid_data test_uuid_test_data[] = {
19 {
20 .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
21 .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
22 .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
23 },
24 {
25 .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
26 .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
27 .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
28 },
29 {
30 .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
31 .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
32 .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
33 },
34};
35
36static const char * const test_uuid_wrong_data[] = {
37 "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
38 "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
39 "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
40};
41
42static unsigned total_tests __initdata;
43static unsigned failed_tests __initdata;
44
45static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
46 const char *data, const char *actual)
47{
48 pr_err("%s test #%u %s %s data: '%s'\n",
49 prefix,
50 total_tests,
51 wrong ? "passed on wrong" : "failed on",
52 be ? "BE" : "LE",
53 data);
54 if (actual && *actual)
55 pr_err("%s test #%u actual data: '%s'\n",
56 prefix,
57 total_tests,
58 actual);
59 failed_tests++;
60}
61
62static void __init test_uuid_test(const struct test_uuid_data *data)
63{
64 uuid_le le;
65 uuid_be be;
66 char buf[48];
67
68 /* LE */
69 total_tests++;
70 if (uuid_le_to_bin(data->uuid, &le))
71 test_uuid_failed("conversion", false, false, data->uuid, NULL);
72
73 total_tests++;
74 if (uuid_le_cmp(data->le, le)) {
75 sprintf(buf, "%pUl", &le);
76 test_uuid_failed("cmp", false, false, data->uuid, buf);
77 }
78
79 /* BE */
80 total_tests++;
81 if (uuid_be_to_bin(data->uuid, &be))
82 test_uuid_failed("conversion", false, true, data->uuid, NULL);
83
84 total_tests++;
85 if (uuid_be_cmp(data->be, be)) {
86 sprintf(buf, "%pUb", &be);
87 test_uuid_failed("cmp", false, true, data->uuid, buf);
88 }
89}
90
91static void __init test_uuid_wrong(const char *data)
92{
93 uuid_le le;
94 uuid_be be;
95
96 /* LE */
97 total_tests++;
98 if (!uuid_le_to_bin(data, &le))
99 test_uuid_failed("negative", true, false, data, NULL);
100
101 /* BE */
102 total_tests++;
103 if (!uuid_be_to_bin(data, &be))
104 test_uuid_failed("negative", true, true, data, NULL);
105}
106
107static int __init test_uuid_init(void)
108{
109 unsigned int i;
110
111 for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
112 test_uuid_test(&test_uuid_test_data[i]);
113
114 for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
115 test_uuid_wrong(test_uuid_wrong_data[i]);
116
117 if (failed_tests == 0)
118 pr_info("all %u tests passed\n", total_tests);
119 else
120 pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
121
122 return failed_tests ? -EINVAL : 0;
123}
124module_init(test_uuid_init);
125
126static void __exit test_uuid_exit(void)
127{
128 /* do nothing */
129}
130module_exit(test_uuid_exit);
131
132MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
133MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/uuid.c b/lib/uuid.c
index e116ae5fa00f..37687af77ff8 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
106 return -EINVAL; 106 return -EINVAL;
107 107
108 for (i = 0; i < 16; i++) { 108 for (i = 0; i < 16; i++) {
109 int hi = hex_to_bin(uuid[si[i]] + 0); 109 int hi = hex_to_bin(uuid[si[i] + 0]);
110 int lo = hex_to_bin(uuid[si[i]] + 1); 110 int lo = hex_to_bin(uuid[si[i] + 1]);
111 111
112 b[ei[i]] = (hi << 4) | lo; 112 b[ei[i]] = (hi << 4) | lo;
113 } 113 }
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..79bfe0e06907 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
441 441
442 /* Found a free page, break it into order-0 pages */ 442 /* Found a free page, break it into order-0 pages */
443 isolated = split_free_page(page); 443 isolated = split_free_page(page);
444 if (!isolated)
445 break;
446
444 total_isolated += isolated; 447 total_isolated += isolated;
448 cc->nr_freepages += isolated;
445 for (i = 0; i < isolated; i++) { 449 for (i = 0; i < isolated; i++) {
446 list_add(&page->lru, freelist); 450 list_add(&page->lru, freelist);
447 page++; 451 page++;
448 } 452 }
449 453 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
450 /* If a page was split, advance to the end of it */ 454 blockpfn += isolated;
451 if (isolated) { 455 break;
452 cc->nr_freepages += isolated;
453 if (!strict &&
454 cc->nr_migratepages <= cc->nr_freepages) {
455 blockpfn += isolated;
456 break;
457 }
458
459 blockpfn += isolated - 1;
460 cursor += isolated - 1;
461 continue;
462 } 456 }
457 /* Advance to the end of split page */
458 blockpfn += isolated - 1;
459 cursor += isolated - 1;
460 continue;
463 461
464isolate_fail: 462isolate_fail:
465 if (strict) 463 if (strict)
@@ -469,6 +467,9 @@ isolate_fail:
469 467
470 } 468 }
471 469
470 if (locked)
471 spin_unlock_irqrestore(&cc->zone->lock, flags);
472
472 /* 473 /*
473 * There is a tiny chance that we have read bogus compound_order(), 474 * There is a tiny chance that we have read bogus compound_order(),
474 * so be careful to not go outside of the pageblock. 475 * so be careful to not go outside of the pageblock.
@@ -490,9 +491,6 @@ isolate_fail:
490 if (strict && blockpfn < end_pfn) 491 if (strict && blockpfn < end_pfn)
491 total_isolated = 0; 492 total_isolated = 0;
492 493
493 if (locked)
494 spin_unlock_irqrestore(&cc->zone->lock, flags);
495
496 /* Update the pageblock-skip if the whole pageblock was scanned */ 494 /* Update the pageblock-skip if the whole pageblock was scanned */
497 if (blockpfn == end_pfn) 495 if (blockpfn == end_pfn)
498 update_pageblock_skip(cc, valid_page, total_isolated, false); 496 update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc)
1011 block_end_pfn = block_start_pfn, 1009 block_end_pfn = block_start_pfn,
1012 block_start_pfn -= pageblock_nr_pages, 1010 block_start_pfn -= pageblock_nr_pages,
1013 isolate_start_pfn = block_start_pfn) { 1011 isolate_start_pfn = block_start_pfn) {
1012 unsigned long isolated;
1014 1013
1015 /* 1014 /*
1016 * This can iterate a massively long zone without finding any 1015 * This can iterate a massively long zone without finding any
@@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc)
1035 continue; 1034 continue;
1036 1035
1037 /* Found a block suitable for isolating free pages from. */ 1036 /* Found a block suitable for isolating free pages from. */
1038 isolate_freepages_block(cc, &isolate_start_pfn, 1037 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1039 block_end_pfn, freelist, false); 1038 block_end_pfn, freelist, false);
1039 /* If isolation failed early, do not continue needlessly */
1040 if (!isolated && isolate_start_pfn < block_end_pfn &&
1041 cc->nr_migratepages > cc->nr_freepages)
1042 break;
1040 1043
1041 /* 1044 /*
1042 * If we isolated enough freepages, or aborted due to async 1045 * If we isolated enough freepages, or aborted due to async
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8024fa7101d..6c707bfe02fd 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
126 */ 126 */
127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; 127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_SHIFT); 128 end_index = (endbyte >> PAGE_SHIFT);
129 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
130 /* First page is tricky as 0 - 1 = -1, but pgoff_t
131 * is unsigned, so the end_index >= start_index
132 * check below would be true and we'll discard the whole
133 * file cache which is not what was asked.
134 */
135 if (end_index == 0)
136 break;
137
138 end_index--;
139 }
129 140
130 if (end_index >= start_index) { 141 if (end_index >= start_index) {
131 unsigned long count = invalidate_mapping_pages(mapping, 142 unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index 00ae878b2a38..20f3b1f33f0e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2186,7 +2186,7 @@ repeat:
2186 if (file->f_ra.mmap_miss > 0) 2186 if (file->f_ra.mmap_miss > 0)
2187 file->f_ra.mmap_miss--; 2187 file->f_ra.mmap_miss--;
2188 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2188 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2189 do_set_pte(vma, addr, page, pte, false, false, true); 2189 do_set_pte(vma, addr, page, pte, false, false);
2190 unlock_page(page); 2190 unlock_page(page);
2191 goto next; 2191 goto next;
2192unlock: 2192unlock:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d26162e81fea..c1f3c0be150a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
832 * Only the process that called mmap() has reserves for 832 * Only the process that called mmap() has reserves for
833 * private mappings. 833 * private mappings.
834 */ 834 */
835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836 return true; 836 /*
837 * Like the shared case above, a hole punch or truncate
838 * could have been performed on the private mapping.
839 * Examine the value of chg to determine if reserves
840 * actually exist or were previously consumed.
841 * Very Subtle - The value of chg comes from a previous
842 * call to vma_needs_reserves(). The reserve map for
843 * private mappings has different (opposite) semantics
844 * than that of shared mappings. vma_needs_reserves()
845 * has already taken this difference in semantics into
846 * account. Therefore, the meaning of chg is the same
847 * as in the shared case above. Code could easily be
848 * combined, but keeping it separate draws attention to
849 * subtle differences.
850 */
851 if (chg)
852 return false;
853 else
854 return true;
855 }
837 856
838 return false; 857 return false;
839} 858}
@@ -1011,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
1011 int nr_pages = 1 << order; 1030 int nr_pages = 1 << order;
1012 struct page *p = page + 1; 1031 struct page *p = page + 1;
1013 1032
1033 atomic_set(compound_mapcount_ptr(page), 0);
1014 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1034 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1015 clear_compound_head(p); 1035 clear_compound_head(p);
1016 set_page_refcounted(p); 1036 set_page_refcounted(p);
@@ -1816,6 +1836,25 @@ static long __vma_reservation_common(struct hstate *h,
1816 1836
1817 if (vma->vm_flags & VM_MAYSHARE) 1837 if (vma->vm_flags & VM_MAYSHARE)
1818 return ret; 1838 return ret;
1839 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1840 /*
1841 * In most cases, reserves always exist for private mappings.
1842 * However, a file associated with mapping could have been
1843 * hole punched or truncated after reserves were consumed.
1844 * As subsequent fault on such a range will not use reserves.
1845 * Subtle - The reserve map for private mappings has the
1846 * opposite meaning than that of shared mappings. If NO
1847 * entry is in the reserve map, it means a reservation exists.
1848 * If an entry exists in the reserve map, it means the
1849 * reservation has already been consumed. As a result, the
1850 * return value of this routine is the opposite of the
1851 * value returned from reserve map manipulation routines above.
1852 */
1853 if (ret)
1854 return 0;
1855 else
1856 return 1;
1857 }
1819 else 1858 else
1820 return ret < 0 ? ret : 0; 1859 return ret < 0 ? ret : 0;
1821} 1860}
@@ -4190,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4190 if (saddr) { 4229 if (saddr) {
4191 spte = huge_pte_offset(svma->vm_mm, saddr); 4230 spte = huge_pte_offset(svma->vm_mm, saddr);
4192 if (spte) { 4231 if (spte) {
4193 mm_inc_nr_pmds(mm);
4194 get_page(virt_to_page(spte)); 4232 get_page(virt_to_page(spte));
4195 break; 4233 break;
4196 } 4234 }
@@ -4205,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4205 if (pud_none(*pud)) { 4243 if (pud_none(*pud)) {
4206 pud_populate(mm, pud, 4244 pud_populate(mm, pud,
4207 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 4245 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4246 mm_inc_nr_pmds(mm);
4208 } else { 4247 } else {
4209 put_page(virt_to_page(spte)); 4248 put_page(virt_to_page(spte));
4210 mm_inc_nr_pmds(mm);
4211 } 4249 }
4212 spin_unlock(ptl); 4250 spin_unlock(ptl);
4213out: 4251out:
diff --git a/mm/internal.h b/mm/internal.h
index a37e5b6f9d25..2524ec880e24 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,7 +24,8 @@
24 */ 24 */
25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
26 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 26 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
27 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) 27 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
28 __GFP_ATOMIC)
28 29
29/* The GFP flags allowed during early boot */ 30/* The GFP flags allowed during early boot */
30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) 31#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 18b6a2b8d183..6845f9294696 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
508 kasan_kmalloc(cache, object, cache->object_size, flags); 508 kasan_kmalloc(cache, object, cache->object_size, flags);
509} 509}
510 510
511void kasan_poison_slab_free(struct kmem_cache *cache, void *object) 511static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
512{ 512{
513 unsigned long size = cache->object_size; 513 unsigned long size = cache->object_size;
514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); 514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
626 kasan_kmalloc(page->slab_cache, object, size, flags); 626 kasan_kmalloc(page->slab_cache, object, size, flags);
627} 627}
628 628
629void kasan_kfree(void *ptr) 629void kasan_poison_kfree(void *ptr)
630{ 630{
631 struct page *page; 631 struct page *page;
632 632
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
637 KASAN_FREE_PAGE); 637 KASAN_FREE_PAGE);
638 else 638 else
639 kasan_slab_free(page->slab_cache, ptr); 639 kasan_poison_slab_free(page->slab_cache, ptr);
640} 640}
641 641
642void kasan_kfree_large(const void *ptr) 642void kasan_kfree_large(const void *ptr)
@@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
763 763
764static int __init kasan_memhotplug_init(void) 764static int __init kasan_memhotplug_init(void)
765{ 765{
766 pr_err("WARNING: KASAN doesn't support memory hot-add\n"); 766 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
767 pr_err("Memory hot-add will be disabled\n"); 767 pr_info("Memory hot-add will be disabled\n");
768 768
769 hotplug_memory_notifier(kasan_mem_notifier, 0); 769 hotplug_memory_notifier(kasan_mem_notifier, 0);
770 770
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e6429926e957..04320d3adbef 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); 307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
308 308
309 seq_printf(seq, " hex dump (first %zu bytes):\n", len); 309 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
310 kasan_disable_current();
310 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, 311 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
311 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); 312 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
313 kasan_enable_current();
312} 314}
313 315
314/* 316/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 925b431f3f03..ac8664db3823 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
1608 1608
1609static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1609static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1610{ 1610{
1611 if (!current->memcg_may_oom || current->memcg_in_oom) 1611 if (!current->memcg_may_oom)
1612 return; 1612 return;
1613 /* 1613 /*
1614 * We are in the middle of the charge context here, so we 1614 * We are in the middle of the charge context here, so we
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2896 * ordering is imposed by list_lru_node->lock taken by 2896 * ordering is imposed by list_lru_node->lock taken by
2897 * memcg_drain_all_list_lrus(). 2897 * memcg_drain_all_list_lrus().
2898 */ 2898 */
2899 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2899 css_for_each_descendant_pre(css, &memcg->css) { 2900 css_for_each_descendant_pre(css, &memcg->css) {
2900 child = mem_cgroup_from_css(css); 2901 child = mem_cgroup_from_css(css);
2901 BUG_ON(child->kmemcg_id != kmemcg_id); 2902 BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2903 if (!memcg->use_hierarchy) 2904 if (!memcg->use_hierarchy)
2904 break; 2905 break;
2905 } 2906 }
2907 rcu_read_unlock();
2908
2906 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2909 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2907 2910
2908 memcg_free_cache_id(kmemcg_id); 2911 memcg_free_cache_id(kmemcg_id);
@@ -4200,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4200 return &memcg->css; 4203 return &memcg->css;
4201fail: 4204fail:
4202 mem_cgroup_free(memcg); 4205 mem_cgroup_free(memcg);
4203 return NULL; 4206 return ERR_PTR(-ENOMEM);
4204} 4207}
4205 4208
4206static int 4209static int
@@ -5541,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5541 struct mem_cgroup *memcg; 5544 struct mem_cgroup *memcg;
5542 unsigned int nr_pages; 5545 unsigned int nr_pages;
5543 bool compound; 5546 bool compound;
5547 unsigned long flags;
5544 5548
5545 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5549 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5546 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5550 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5571,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5571 5575
5572 commit_charge(newpage, memcg, false); 5576 commit_charge(newpage, memcg, false);
5573 5577
5574 local_irq_disable(); 5578 local_irq_save(flags);
5575 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5579 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5576 memcg_check_events(memcg, newpage); 5580 memcg_check_events(memcg, newpage);
5577 local_irq_enable(); 5581 local_irq_restore(flags);
5578} 5582}
5579 5583
5580DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5584DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
diff --git a/mm/memory.c b/mm/memory.c
index 15322b73636b..cd1f29e4897e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2877 * vm_ops->map_pages. 2877 * vm_ops->map_pages.
2878 */ 2878 */
2879void do_set_pte(struct vm_area_struct *vma, unsigned long address, 2879void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2880 struct page *page, pte_t *pte, bool write, bool anon, bool old) 2880 struct page *page, pte_t *pte, bool write, bool anon)
2881{ 2881{
2882 pte_t entry; 2882 pte_t entry;
2883 2883
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2885 entry = mk_pte(page, vma->vm_page_prot); 2885 entry = mk_pte(page, vma->vm_page_prot);
2886 if (write) 2886 if (write)
2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2888 if (old)
2889 entry = pte_mkold(entry);
2890 if (anon) { 2888 if (anon) {
2891 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 2889 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2892 page_add_new_anon_rmap(page, vma, address, false); 2890 page_add_new_anon_rmap(page, vma, address, false);
@@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2900 update_mmu_cache(vma, address, pte); 2898 update_mmu_cache(vma, address, pte);
2901} 2899}
2902 2900
2903/*
2904 * If architecture emulates "accessed" or "young" bit without HW support,
2905 * there is no much gain with fault_around.
2906 */
2907static unsigned long fault_around_bytes __read_mostly = 2901static unsigned long fault_around_bytes __read_mostly =
2908#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
2909 PAGE_SIZE;
2910#else
2911 rounddown_pow_of_two(65536); 2902 rounddown_pow_of_two(65536);
2912#endif
2913 2903
2914#ifdef CONFIG_DEBUG_FS 2904#ifdef CONFIG_DEBUG_FS
2915static int fault_around_bytes_get(void *data, u64 *val) 2905static int fault_around_bytes_get(void *data, u64 *val)
@@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3032 */ 3022 */
3033 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { 3023 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3034 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 3024 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3035 if (!pte_same(*pte, orig_pte))
3036 goto unlock_out;
3037 do_fault_around(vma, address, pte, pgoff, flags); 3025 do_fault_around(vma, address, pte, pgoff, flags);
3038 /* Check if the fault is handled by faultaround */ 3026 if (!pte_same(*pte, orig_pte))
3039 if (!pte_same(*pte, orig_pte)) {
3040 /*
3041 * Faultaround produce old pte, but the pte we've
3042 * handler fault for should be young.
3043 */
3044 pte_t entry = pte_mkyoung(*pte);
3045 if (ptep_set_access_flags(vma, address, pte, entry, 0))
3046 update_mmu_cache(vma, address, pte);
3047 goto unlock_out; 3027 goto unlock_out;
3048 }
3049 pte_unmap_unlock(pte, ptl); 3028 pte_unmap_unlock(pte, ptl);
3050 } 3029 }
3051 3030
@@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3060 put_page(fault_page); 3039 put_page(fault_page);
3061 return ret; 3040 return ret;
3062 } 3041 }
3063 do_set_pte(vma, address, fault_page, pte, false, false, false); 3042 do_set_pte(vma, address, fault_page, pte, false, false);
3064 unlock_page(fault_page); 3043 unlock_page(fault_page);
3065unlock_out: 3044unlock_out:
3066 pte_unmap_unlock(pte, ptl); 3045 pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3111 } 3090 }
3112 goto uncharge_out; 3091 goto uncharge_out;
3113 } 3092 }
3114 do_set_pte(vma, address, new_page, pte, true, true, false); 3093 do_set_pte(vma, address, new_page, pte, true, true);
3115 mem_cgroup_commit_charge(new_page, memcg, false, false); 3094 mem_cgroup_commit_charge(new_page, memcg, false, false);
3116 lru_cache_add_active_or_unevictable(new_page, vma); 3095 lru_cache_add_active_or_unevictable(new_page, vma);
3117 pte_unmap_unlock(pte, ptl); 3096 pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3164 put_page(fault_page); 3143 put_page(fault_page);
3165 return ret; 3144 return ret;
3166 } 3145 }
3167 do_set_pte(vma, address, fault_page, pte, true, false, false); 3146 do_set_pte(vma, address, fault_page, pte, true, false);
3168 pte_unmap_unlock(pte, ptl); 3147 pte_unmap_unlock(pte, ptl);
3169 3148
3170 if (set_page_dirty(fault_page)) 3149 if (set_page_dirty(fault_page))
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e075f829d0d..8f65464da5de 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
104 104
105static void kasan_poison_element(mempool_t *pool, void *element) 105static void kasan_poison_element(mempool_t *pool, void *element)
106{ 106{
107 if (pool->alloc == mempool_alloc_slab) 107 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
108 kasan_poison_slab_free(pool->pool_data, element); 108 kasan_poison_kfree(element);
109 if (pool->alloc == mempool_kmalloc)
110 kasan_kfree(element);
111 if (pool->alloc == mempool_alloc_pages) 109 if (pool->alloc == mempool_alloc_pages)
112 kasan_free_pages(element, (unsigned long)pool->pool_data); 110 kasan_free_pages(element, (unsigned long)pool->pool_data);
113} 111}
114 112
115static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) 113static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
116{ 114{
117 if (pool->alloc == mempool_alloc_slab) 115 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
118 kasan_slab_alloc(pool->pool_data, element, flags); 116 kasan_unpoison_slab(element);
119 if (pool->alloc == mempool_kmalloc)
120 kasan_krealloc(element, (size_t)pool->pool_data, flags);
121 if (pool->alloc == mempool_alloc_pages) 117 if (pool->alloc == mempool_alloc_pages)
122 kasan_alloc_pages(element, (unsigned long)pool->pool_data); 118 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
123} 119}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9baf41c877ff..bd3fdc202e8b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
431 431
432 return MIGRATEPAGE_SUCCESS; 432 return MIGRATEPAGE_SUCCESS;
433} 433}
434EXPORT_SYMBOL(migrate_page_move_mapping);
434 435
435/* 436/*
436 * The expected number of remaining references is the same as that 437 * The expected number of remaining references is the same as that
@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
586 587
587 mem_cgroup_migrate(page, newpage); 588 mem_cgroup_migrate(page, newpage);
588} 589}
590EXPORT_SYMBOL(migrate_page_copy);
589 591
590/************************************************************ 592/************************************************************
591 * Migration functions 593 * Migration functions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dfb1ab61fb23..ddf74487f848 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
474 p = find_lock_task_mm(tsk); 474 p = find_lock_task_mm(tsk);
475 if (!p) 475 if (!p)
476 goto unlock_oom; 476 goto unlock_oom;
477
478 mm = p->mm; 477 mm = p->mm;
479 if (!atomic_inc_not_zero(&mm->mm_users)) { 478 atomic_inc(&mm->mm_users);
480 task_unlock(p);
481 goto unlock_oom;
482 }
483
484 task_unlock(p); 479 task_unlock(p);
485 480
486 if (!down_read_trylock(&mm->mmap_sem)) { 481 if (!down_read_trylock(&mm->mmap_sem)) {
@@ -625,8 +620,6 @@ void try_oom_reaper(struct task_struct *tsk)
625 if (atomic_read(&mm->mm_users) > 1) { 620 if (atomic_read(&mm->mm_users) > 1) {
626 rcu_read_lock(); 621 rcu_read_lock();
627 for_each_process(p) { 622 for_each_process(p) {
628 bool exiting;
629
630 if (!process_shares_mm(p, mm)) 623 if (!process_shares_mm(p, mm))
631 continue; 624 continue;
632 if (fatal_signal_pending(p)) 625 if (fatal_signal_pending(p))
@@ -636,10 +629,7 @@ void try_oom_reaper(struct task_struct *tsk)
636 * If the task is exiting make sure the whole thread group 629 * If the task is exiting make sure the whole thread group
637 * is exiting and cannot acces mm anymore. 630 * is exiting and cannot acces mm anymore.
638 */ 631 */
639 spin_lock_irq(&p->sighand->siglock); 632 if (signal_group_exit(p->signal))
640 exiting = signal_group_exit(p->signal);
641 spin_unlock_irq(&p->sighand->siglock);
642 if (exiting)
643 continue; 633 continue;
644 634
645 /* Give up */ 635 /* Give up */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b9956fdee8f5..e2481949494c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); 373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
374 unsigned long bytes = vm_dirty_bytes; 374 unsigned long bytes = vm_dirty_bytes;
375 unsigned long bg_bytes = dirty_background_bytes; 375 unsigned long bg_bytes = dirty_background_bytes;
376 unsigned long ratio = vm_dirty_ratio; 376 /* convert ratios to per-PAGE_SIZE for higher precision */
377 unsigned long bg_ratio = dirty_background_ratio; 377 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
378 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
378 unsigned long thresh; 379 unsigned long thresh;
379 unsigned long bg_thresh; 380 unsigned long bg_thresh;
380 struct task_struct *tsk; 381 struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
386 /* 387 /*
387 * The byte settings can't be applied directly to memcg 388 * The byte settings can't be applied directly to memcg
388 * domains. Convert them to ratios by scaling against 389 * domains. Convert them to ratios by scaling against
389 * globally available memory. 390 * globally available memory. As the ratios are in
391 * per-PAGE_SIZE, they can be obtained by dividing bytes by
392 * number of pages.
390 */ 393 */
391 if (bytes) 394 if (bytes)
392 ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / 395 ratio = min(DIV_ROUND_UP(bytes, global_avail),
393 global_avail, 100UL); 396 PAGE_SIZE);
394 if (bg_bytes) 397 if (bg_bytes)
395 bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / 398 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
396 global_avail, 100UL); 399 PAGE_SIZE);
397 bytes = bg_bytes = 0; 400 bytes = bg_bytes = 0;
398 } 401 }
399 402
400 if (bytes) 403 if (bytes)
401 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); 404 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
402 else 405 else
403 thresh = (ratio * available_memory) / 100; 406 thresh = (ratio * available_memory) / PAGE_SIZE;
404 407
405 if (bg_bytes) 408 if (bg_bytes)
406 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); 409 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
407 else 410 else
408 bg_thresh = (bg_ratio * available_memory) / 100; 411 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
409 412
410 if (bg_thresh >= thresh) 413 if (bg_thresh >= thresh)
411 bg_thresh = thresh / 2; 414 bg_thresh = thresh / 2;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8f3bfc435ee..6903b695ebae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
656 return; 656 return;
657 657
658 page_ext = lookup_page_ext(page); 658 page_ext = lookup_page_ext(page);
659 if (unlikely(!page_ext))
660 return;
661
659 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 662 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
660 663
661 INIT_LIST_HEAD(&page->lru); 664 INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
673 return; 676 return;
674 677
675 page_ext = lookup_page_ext(page); 678 page_ext = lookup_page_ext(page);
679 if (unlikely(!page_ext))
680 return;
681
676 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 682 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
677 683
678 set_page_private(page, 0); 684 set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2609 page = list_last_entry(list, struct page, lru); 2615 page = list_last_entry(list, struct page, lru);
2610 else 2616 else
2611 page = list_first_entry(list, struct page, lru); 2617 page = list_first_entry(list, struct page, lru);
2612 } while (page && check_new_pcp(page));
2613 2618
2614 __dec_zone_state(zone, NR_ALLOC_BATCH); 2619 __dec_zone_state(zone, NR_ALLOC_BATCH);
2615 list_del(&page->lru); 2620 list_del(&page->lru);
2616 pcp->count--; 2621 pcp->count--;
2622
2623 } while (check_new_pcp(page));
2617 } else { 2624 } else {
2618 /* 2625 /*
2619 * We most definitely don't want callers attempting to 2626 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ reset_fair:
3023 apply_fair = false; 3030 apply_fair = false;
3024 fair_skipped = false; 3031 fair_skipped = false;
3025 reset_alloc_batches(ac->preferred_zoneref->zone); 3032 reset_alloc_batches(ac->preferred_zoneref->zone);
3033 z = ac->preferred_zoneref;
3026 goto zonelist_scan; 3034 goto zonelist_scan;
3027 } 3035 }
3028 3036
@@ -3596,6 +3604,17 @@ retry:
3596 */ 3604 */
3597 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3605 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3598 3606
3607 /*
3608 * Reset the zonelist iterators if memory policies can be ignored.
3609 * These allocations are high priority and system rather than user
3610 * orientated.
3611 */
3612 if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
3613 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3614 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3615 ac->high_zoneidx, ac->nodemask);
3616 }
3617
3599 /* This is the last chance, in general, before the goto nopage. */ 3618 /* This is the last chance, in general, before the goto nopage. */
3600 page = get_page_from_freelist(gfp_mask, order, 3619 page = get_page_from_freelist(gfp_mask, order,
3601 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3620 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ retry:
3604 3623
3605 /* Allocate without watermarks if the context allows */ 3624 /* Allocate without watermarks if the context allows */
3606 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3625 if (alloc_flags & ALLOC_NO_WATERMARKS) {
3607 /*
3608 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3609 * the allocation is high priority and these type of
3610 * allocations are system rather than user orientated
3611 */
3612 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3613 page = get_page_from_freelist(gfp_mask, order, 3626 page = get_page_from_freelist(gfp_mask, order,
3614 ALLOC_NO_WATERMARKS, ac); 3627 ALLOC_NO_WATERMARKS, ac);
3615 if (page) 3628 if (page)
@@ -3808,7 +3821,11 @@ retry_cpuset:
3808 /* Dirty zone balancing only done in the fast path */ 3821 /* Dirty zone balancing only done in the fast path */
3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3822 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3810 3823
3811 /* The preferred zone is used for statistics later */ 3824 /*
3825 * The preferred zone is used for statistics but crucially it is
3826 * also used as the starting point for the zonelist iterator. It
3827 * may get reset for allocations that ignore memory policies.
3828 */
3812 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3829 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3813 ac.high_zoneidx, ac.nodemask); 3830 ac.high_zoneidx, ac.nodemask);
3814 if (!ac.preferred_zoneref) { 3831 if (!ac.preferred_zoneref) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 792b56da13d8..fedeba88c9cb 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
55 55
56 for (i = 0; i < (1 << order); i++) { 56 for (i = 0; i < (1 << order); i++) {
57 page_ext = lookup_page_ext(page + i); 57 page_ext = lookup_page_ext(page + i);
58 if (unlikely(!page_ext))
59 continue;
58 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); 60 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
59 } 61 }
60} 62}
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
62void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) 64void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
63{ 65{
64 struct page_ext *page_ext = lookup_page_ext(page); 66 struct page_ext *page_ext = lookup_page_ext(page);
67
65 struct stack_trace trace = { 68 struct stack_trace trace = {
66 .nr_entries = 0, 69 .nr_entries = 0,
67 .max_entries = ARRAY_SIZE(page_ext->trace_entries), 70 .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
69 .skip = 3, 72 .skip = 3,
70 }; 73 };
71 74
75 if (unlikely(!page_ext))
76 return;
77
72 save_stack_trace(&trace); 78 save_stack_trace(&trace);
73 79
74 page_ext->order = order; 80 page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
82void __set_page_owner_migrate_reason(struct page *page, int reason) 88void __set_page_owner_migrate_reason(struct page *page, int reason)
83{ 89{
84 struct page_ext *page_ext = lookup_page_ext(page); 90 struct page_ext *page_ext = lookup_page_ext(page);
91 if (unlikely(!page_ext))
92 return;
85 93
86 page_ext->last_migrate_reason = reason; 94 page_ext->last_migrate_reason = reason;
87} 95}
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
89gfp_t __get_page_owner_gfp(struct page *page) 97gfp_t __get_page_owner_gfp(struct page *page)
90{ 98{
91 struct page_ext *page_ext = lookup_page_ext(page); 99 struct page_ext *page_ext = lookup_page_ext(page);
100 if (unlikely(!page_ext))
101 /*
102 * The caller just returns 0 if no valid gfp
103 * So return 0 here too.
104 */
105 return 0;
92 106
93 return page_ext->gfp_mask; 107 return page_ext->gfp_mask;
94} 108}
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
99 struct page_ext *new_ext = lookup_page_ext(newpage); 113 struct page_ext *new_ext = lookup_page_ext(newpage);
100 int i; 114 int i;
101 115
116 if (unlikely(!old_ext || !new_ext))
117 return;
118
102 new_ext->order = old_ext->order; 119 new_ext->order = old_ext->order;
103 new_ext->gfp_mask = old_ext->gfp_mask; 120 new_ext->gfp_mask = old_ext->gfp_mask;
104 new_ext->nr_entries = old_ext->nr_entries; 121 new_ext->nr_entries = old_ext->nr_entries;
@@ -190,8 +207,15 @@ void __dump_page_owner(struct page *page)
190 .nr_entries = page_ext->nr_entries, 207 .nr_entries = page_ext->nr_entries,
191 .entries = &page_ext->trace_entries[0], 208 .entries = &page_ext->trace_entries[0],
192 }; 209 };
193 gfp_t gfp_mask = page_ext->gfp_mask; 210 gfp_t gfp_mask;
194 int mt = gfpflags_to_migratetype(gfp_mask); 211 int mt;
212
213 if (unlikely(!page_ext)) {
214 pr_alert("There is not page extension available.\n");
215 return;
216 }
217 gfp_mask = page_ext->gfp_mask;
218 mt = gfpflags_to_migratetype(gfp_mask);
195 219
196 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 220 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
197 pr_alert("page_owner info is not active (free page?)\n"); 221 pr_alert("page_owner info is not active (free page?)\n");
@@ -251,6 +275,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
251 } 275 }
252 276
253 page_ext = lookup_page_ext(page); 277 page_ext = lookup_page_ext(page);
278 if (unlikely(!page_ext))
279 continue;
254 280
255 /* 281 /*
256 * Some pages could be missed by concurrent allocation or free, 282 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +343,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
317 continue; 343 continue;
318 344
319 page_ext = lookup_page_ext(page); 345 page_ext = lookup_page_ext(page);
346 if (unlikely(!page_ext))
347 continue;
320 348
321 /* Maybe overraping zone */ 349 /* Maybe overraping zone */
322 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 350 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 1eae5fad2446..2e647c65916b 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
54 struct page_ext *page_ext; 54 struct page_ext *page_ext;
55 55
56 page_ext = lookup_page_ext(page); 56 page_ext = lookup_page_ext(page);
57 if (unlikely(!page_ext))
58 return;
59
57 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
58} 61}
59 62
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
62 struct page_ext *page_ext; 65 struct page_ext *page_ext;
63 66
64 page_ext = lookup_page_ext(page); 67 page_ext = lookup_page_ext(page);
68 if (unlikely(!page_ext))
69 return;
70
65 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
66} 72}
67 73
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
70 struct page_ext *page_ext; 76 struct page_ext *page_ext;
71 77
72 page_ext = lookup_page_ext(page); 78 page_ext = lookup_page_ext(page);
73 if (!page_ext) 79 if (unlikely(!page_ext))
74 return false; 80 return false;
75 81
76 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
diff --git a/mm/percpu.c b/mm/percpu.c
index 0c59684f1ff2..9903830aaebb 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -112,7 +112,7 @@ struct pcpu_chunk {
112 int map_used; /* # of map entries used before the sentry */ 112 int map_used; /* # of map entries used before the sentry */
113 int map_alloc; /* # of map entries allocated */ 113 int map_alloc; /* # of map entries allocated */
114 int *map; /* allocation map */ 114 int *map; /* allocation map */
115 struct work_struct map_extend_work;/* async ->map[] extension */ 115 struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
116 116
117 void *data; /* chunk data */ 117 void *data; /* chunk data */
118 int first_free; /* no free below this */ 118 int first_free; /* no free below this */
@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
162static int pcpu_reserved_chunk_limit; 162static int pcpu_reserved_chunk_limit;
163 163
164static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 164static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
165static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ 165static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
166 166
167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168 168
169/* chunks which need their map areas extended, protected by pcpu_lock */
170static LIST_HEAD(pcpu_map_extend_chunks);
171
169/* 172/*
170 * The number of empty populated pages, protected by pcpu_lock. The 173 * The number of empty populated pages, protected by pcpu_lock. The
171 * reserved chunk doesn't contribute to the count. 174 * reserved chunk doesn't contribute to the count.
@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
395{ 398{
396 int margin, new_alloc; 399 int margin, new_alloc;
397 400
401 lockdep_assert_held(&pcpu_lock);
402
398 if (is_atomic) { 403 if (is_atomic) {
399 margin = 3; 404 margin = 3;
400 405
401 if (chunk->map_alloc < 406 if (chunk->map_alloc <
402 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && 407 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
403 pcpu_async_enabled) 408 if (list_empty(&chunk->map_extend_list)) {
404 schedule_work(&chunk->map_extend_work); 409 list_add_tail(&chunk->map_extend_list,
410 &pcpu_map_extend_chunks);
411 pcpu_schedule_balance_work();
412 }
413 }
405 } else { 414 } else {
406 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; 415 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
407 } 416 }
@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
435 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 444 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
436 unsigned long flags; 445 unsigned long flags;
437 446
447 lockdep_assert_held(&pcpu_alloc_mutex);
448
438 new = pcpu_mem_zalloc(new_size); 449 new = pcpu_mem_zalloc(new_size);
439 if (!new) 450 if (!new)
440 return -ENOMEM; 451 return -ENOMEM;
@@ -467,20 +478,6 @@ out_unlock:
467 return 0; 478 return 0;
468} 479}
469 480
470static void pcpu_map_extend_workfn(struct work_struct *work)
471{
472 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
473 map_extend_work);
474 int new_alloc;
475
476 spin_lock_irq(&pcpu_lock);
477 new_alloc = pcpu_need_to_extend(chunk, false);
478 spin_unlock_irq(&pcpu_lock);
479
480 if (new_alloc)
481 pcpu_extend_area_map(chunk, new_alloc);
482}
483
484/** 481/**
485 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 482 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
486 * @chunk: chunk the candidate area belongs to 483 * @chunk: chunk the candidate area belongs to
@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
740 chunk->map_used = 1; 737 chunk->map_used = 1;
741 738
742 INIT_LIST_HEAD(&chunk->list); 739 INIT_LIST_HEAD(&chunk->list);
743 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); 740 INIT_LIST_HEAD(&chunk->map_extend_list);
744 chunk->free_size = pcpu_unit_size; 741 chunk->free_size = pcpu_unit_size;
745 chunk->contig_hint = pcpu_unit_size; 742 chunk->contig_hint = pcpu_unit_size;
746 743
@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
895 return NULL; 892 return NULL;
896 } 893 }
897 894
895 if (!is_atomic)
896 mutex_lock(&pcpu_alloc_mutex);
897
898 spin_lock_irqsave(&pcpu_lock, flags); 898 spin_lock_irqsave(&pcpu_lock, flags);
899 899
900 /* serve reserved allocations from the reserved chunk if available */ 900 /* serve reserved allocations from the reserved chunk if available */
@@ -967,12 +967,9 @@ restart:
967 if (is_atomic) 967 if (is_atomic)
968 goto fail; 968 goto fail;
969 969
970 mutex_lock(&pcpu_alloc_mutex);
971
972 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 970 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973 chunk = pcpu_create_chunk(); 971 chunk = pcpu_create_chunk();
974 if (!chunk) { 972 if (!chunk) {
975 mutex_unlock(&pcpu_alloc_mutex);
976 err = "failed to allocate new chunk"; 973 err = "failed to allocate new chunk";
977 goto fail; 974 goto fail;
978 } 975 }
@@ -983,7 +980,6 @@ restart:
983 spin_lock_irqsave(&pcpu_lock, flags); 980 spin_lock_irqsave(&pcpu_lock, flags);
984 } 981 }
985 982
986 mutex_unlock(&pcpu_alloc_mutex);
987 goto restart; 983 goto restart;
988 984
989area_found: 985area_found:
@@ -993,8 +989,6 @@ area_found:
993 if (!is_atomic) { 989 if (!is_atomic) {
994 int page_start, page_end, rs, re; 990 int page_start, page_end, rs, re;
995 991
996 mutex_lock(&pcpu_alloc_mutex);
997
998 page_start = PFN_DOWN(off); 992 page_start = PFN_DOWN(off);
999 page_end = PFN_UP(off + size); 993 page_end = PFN_UP(off + size);
1000 994
@@ -1005,7 +999,6 @@ area_found:
1005 999
1006 spin_lock_irqsave(&pcpu_lock, flags); 1000 spin_lock_irqsave(&pcpu_lock, flags);
1007 if (ret) { 1001 if (ret) {
1008 mutex_unlock(&pcpu_alloc_mutex);
1009 pcpu_free_area(chunk, off, &occ_pages); 1002 pcpu_free_area(chunk, off, &occ_pages);
1010 err = "failed to populate"; 1003 err = "failed to populate";
1011 goto fail_unlock; 1004 goto fail_unlock;
@@ -1045,6 +1038,8 @@ fail:
1045 /* see the flag handling in pcpu_blance_workfn() */ 1038 /* see the flag handling in pcpu_blance_workfn() */
1046 pcpu_atomic_alloc_failed = true; 1039 pcpu_atomic_alloc_failed = true;
1047 pcpu_schedule_balance_work(); 1040 pcpu_schedule_balance_work();
1041 } else {
1042 mutex_unlock(&pcpu_alloc_mutex);
1048 } 1043 }
1049 return NULL; 1044 return NULL;
1050} 1045}
@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
1129 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1124 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130 continue; 1125 continue;
1131 1126
1127 list_del_init(&chunk->map_extend_list);
1132 list_move(&chunk->list, &to_free); 1128 list_move(&chunk->list, &to_free);
1133 } 1129 }
1134 1130
@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
1146 pcpu_destroy_chunk(chunk); 1142 pcpu_destroy_chunk(chunk);
1147 } 1143 }
1148 1144
1145 /* service chunks which requested async area map extension */
1146 do {
1147 int new_alloc = 0;
1148
1149 spin_lock_irq(&pcpu_lock);
1150
1151 chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1152 struct pcpu_chunk, map_extend_list);
1153 if (chunk) {
1154 list_del_init(&chunk->map_extend_list);
1155 new_alloc = pcpu_need_to_extend(chunk, false);
1156 }
1157
1158 spin_unlock_irq(&pcpu_lock);
1159
1160 if (new_alloc)
1161 pcpu_extend_area_map(chunk, new_alloc);
1162 } while (chunk);
1163
1149 /* 1164 /*
1150 * Ensure there are certain number of free populated pages for 1165 * Ensure there are certain number of free populated pages for
1151 * atomic allocs. Fill up from the most packed so that atomic 1166 * atomic allocs. Fill up from the most packed so that atomic
@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1644 */ 1659 */
1645 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1660 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1646 INIT_LIST_HEAD(&schunk->list); 1661 INIT_LIST_HEAD(&schunk->list);
1647 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); 1662 INIT_LIST_HEAD(&schunk->map_extend_list);
1648 schunk->base_addr = base_addr; 1663 schunk->base_addr = base_addr;
1649 schunk->map = smap; 1664 schunk->map = smap;
1650 schunk->map_alloc = ARRAY_SIZE(smap); 1665 schunk->map_alloc = ARRAY_SIZE(smap);
@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1673 if (dyn_size) { 1688 if (dyn_size) {
1674 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1689 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1675 INIT_LIST_HEAD(&dchunk->list); 1690 INIT_LIST_HEAD(&dchunk->list);
1676 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); 1691 INIT_LIST_HEAD(&dchunk->map_extend_list);
1677 dchunk->base_addr = base_addr; 1692 dchunk->base_addr = base_addr;
1678 dchunk->map = dmap; 1693 dchunk->map = dmap;
1679 dchunk->map_alloc = ARRAY_SIZE(dmap); 1694 dchunk->map_alloc = ARRAY_SIZE(dmap);
diff --git a/mm/shmem.c b/mm/shmem.c
index a36144909b28..24463b67b6ef 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2227 /* Remove the !PageUptodate pages we added */ 2227 /* Remove the !PageUptodate pages we added */
2228 shmem_undo_range(inode, 2228 shmem_undo_range(inode,
2229 (loff_t)start << PAGE_SHIFT, 2229 (loff_t)start << PAGE_SHIFT,
2230 (loff_t)index << PAGE_SHIFT, true); 2230 ((loff_t)index << PAGE_SHIFT) - 1, true);
2231 goto undone; 2231 goto undone;
2232 } 2232 }
2233 2233
diff --git a/mm/swap.c b/mm/swap.c
index 95916142fc46..90530ff8ed16 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
242 get_page(page); 242 get_page(page);
243 local_irq_save(flags); 243 local_irq_save(flags);
244 pvec = this_cpu_ptr(&lru_rotate_pvecs); 244 pvec = this_cpu_ptr(&lru_rotate_pvecs);
245 if (!pagevec_add(pvec, page)) 245 if (!pagevec_add(pvec, page) || PageCompound(page))
246 pagevec_move_tail(pvec); 246 pagevec_move_tail(pvec);
247 local_irq_restore(flags); 247 local_irq_restore(flags);
248 } 248 }
@@ -296,7 +296,7 @@ void activate_page(struct page *page)
296 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 296 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
297 297
298 get_page(page); 298 get_page(page);
299 if (!pagevec_add(pvec, page)) 299 if (!pagevec_add(pvec, page) || PageCompound(page))
300 pagevec_lru_move_fn(pvec, __activate_page, NULL); 300 pagevec_lru_move_fn(pvec, __activate_page, NULL);
301 put_cpu_var(activate_page_pvecs); 301 put_cpu_var(activate_page_pvecs);
302 } 302 }
@@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
391 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 391 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
392 392
393 get_page(page); 393 get_page(page);
394 if (!pagevec_space(pvec)) 394 if (!pagevec_add(pvec, page) || PageCompound(page))
395 __pagevec_lru_add(pvec); 395 __pagevec_lru_add(pvec);
396 pagevec_add(pvec, page);
397 put_cpu_var(lru_add_pvec); 396 put_cpu_var(lru_add_pvec);
398} 397}
399 398
@@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
628 if (likely(get_page_unless_zero(page))) { 627 if (likely(get_page_unless_zero(page))) {
629 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 628 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
630 629
631 if (!pagevec_add(pvec, page)) 630 if (!pagevec_add(pvec, page) || PageCompound(page))
632 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 631 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
633 put_cpu_var(lru_deactivate_file_pvecs); 632 put_cpu_var(lru_deactivate_file_pvecs);
634 } 633 }
@@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
648 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
649 648
650 get_page(page); 649 get_page(page);
651 if (!pagevec_add(pvec, page)) 650 if (!pagevec_add(pvec, page) || PageCompound(page))
652 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
653 put_cpu_var(lru_deactivate_pvecs); 652 put_cpu_var(lru_deactivate_pvecs);
654 } 653 }
@@ -667,6 +666,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
667 666
668static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 667static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
669 668
669/*
670 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
671 * workqueue, aiding in getting memory freed.
672 */
673static struct workqueue_struct *lru_add_drain_wq;
674
675static int __init lru_init(void)
676{
677 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
678
679 if (WARN(!lru_add_drain_wq,
680 "Failed to create workqueue lru_add_drain_wq"))
681 return -ENOMEM;
682
683 return 0;
684}
685early_initcall(lru_init);
686
670void lru_add_drain_all(void) 687void lru_add_drain_all(void)
671{ 688{
672 static DEFINE_MUTEX(lock); 689 static DEFINE_MUTEX(lock);
@@ -686,7 +703,7 @@ void lru_add_drain_all(void)
686 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 703 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
687 need_activate_page_drain(cpu)) { 704 need_activate_page_drain(cpu)) {
688 INIT_WORK(work, lru_add_drain_per_cpu); 705 INIT_WORK(work, lru_add_drain_per_cpu);
689 schedule_work_on(cpu, work); 706 queue_work_on(cpu, lru_add_drain_wq, work);
690 cpumask_set_cpu(cpu, &has_work); 707 cpumask_set_cpu(cpu, &has_work);
691 } 708 }
692 } 709 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0d457e7db8d6..c99463ac02fb 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
252void free_page_and_swap_cache(struct page *page) 252void free_page_and_swap_cache(struct page *page)
253{ 253{
254 free_swap_cache(page); 254 free_swap_cache(page);
255 put_page(page); 255 if (is_huge_zero_page(page))
256 put_huge_zero_page();
257 else
258 put_page(page);
256} 259}
257 260
258/* 261/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cf7ad1a53be0..e11475cdeb7a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1105 */ 1105 */
1106void vm_unmap_ram(const void *mem, unsigned int count) 1106void vm_unmap_ram(const void *mem, unsigned int count)
1107{ 1107{
1108 unsigned long size = count << PAGE_SHIFT; 1108 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1109 unsigned long addr = (unsigned long)mem; 1109 unsigned long addr = (unsigned long)mem;
1110 1110
1111 BUG_ON(!addr); 1111 BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
1140 */ 1140 */
1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1142{ 1142{
1143 unsigned long size = count << PAGE_SHIFT; 1143 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1144 unsigned long addr; 1144 unsigned long addr;
1145 void *mem; 1145 void *mem;
1146 1146
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
1574 unsigned long flags, pgprot_t prot) 1574 unsigned long flags, pgprot_t prot)
1575{ 1575{
1576 struct vm_struct *area; 1576 struct vm_struct *area;
1577 unsigned long size; /* In bytes */
1577 1578
1578 might_sleep(); 1579 might_sleep();
1579 1580
1580 if (count > totalram_pages) 1581 if (count > totalram_pages)
1581 return NULL; 1582 return NULL;
1582 1583
1583 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1584 size = (unsigned long)count << PAGE_SHIFT;
1584 __builtin_return_address(0)); 1585 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1585 if (!area) 1586 if (!area)
1586 return NULL; 1587 return NULL;
1587 1588
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77e42ef388c2..cb2a67bb4158 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1061 continue; 1061 continue;
1062 1062
1063 page_ext = lookup_page_ext(page); 1063 page_ext = lookup_page_ext(page);
1064 if (unlikely(!page_ext))
1065 continue;
1064 1066
1065 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 1067 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1066 continue; 1068 continue;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 34917d55d311..8f9e89ca1d31 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
412 /* HEADLESS page stored */ 412 /* HEADLESS page stored */
413 bud = HEADLESS; 413 bud = HEADLESS;
414 } else { 414 } else {
415 bud = (handle - zhdr->first_num) & BUDDY_MASK; 415 bud = handle_to_buddy(handle);
416 416
417 switch (bud) { 417 switch (bud) {
418 case FIRST: 418 case FIRST:
@@ -572,15 +572,19 @@ next:
572 pool->pages_nr--; 572 pool->pages_nr--;
573 spin_unlock(&pool->lock); 573 spin_unlock(&pool->lock);
574 return 0; 574 return 0;
575 } else if (zhdr->first_chunks != 0 && 575 } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
576 zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { 576 if (zhdr->first_chunks != 0 &&
577 /* Full, add to buddied list */ 577 zhdr->last_chunks != 0 &&
578 list_add(&zhdr->buddy, &pool->buddied); 578 zhdr->middle_chunks != 0) {
579 } else if (!test_bit(PAGE_HEADLESS, &page->private)) { 579 /* Full, add to buddied list */
580 z3fold_compact_page(zhdr); 580 list_add(&zhdr->buddy, &pool->buddied);
581 /* add to unbuddied list */ 581 } else {
582 freechunks = num_free_chunks(zhdr); 582 z3fold_compact_page(zhdr);
583 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); 583 /* add to unbuddied list */
584 freechunks = num_free_chunks(zhdr);
585 list_add(&zhdr->buddy,
586 &pool->unbuddied[freechunks]);
587 }
584 } 588 }
585 589
586 /* add to beginning of LRU */ 590 /* add to beginning of LRU */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a1e273af6fc8..82a116ba590e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
291 return; 291 return;
292 292
293 /* vlan continues to inherit address of lower device */
294 if (vlan_dev_inherit_address(vlandev, dev))
295 goto out;
296
293 /* vlan address was different from the old address and is equal to 297 /* vlan address was different from the old address and is equal to
294 * the new address */ 298 * the new address */
295 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 299 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
302 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 306 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
303 dev_uc_add(dev, vlandev->dev_addr); 307 dev_uc_add(dev, vlandev->dev_addr);
304 308
309out:
305 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 310 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
306} 311}
307 312
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09ab98..cc1557978066 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
109void vlan_setup(struct net_device *dev); 109void vlan_setup(struct net_device *dev);
110int register_vlan_dev(struct net_device *dev); 110int register_vlan_dev(struct net_device *dev);
111void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 111void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
112bool vlan_dev_inherit_address(struct net_device *dev,
113 struct net_device *real_dev);
112 114
113static inline u32 vlan_get_ingress_priority(struct net_device *dev, 115static inline u32 vlan_get_ingress_priority(struct net_device *dev,
114 u16 vlan_tci) 116 u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e7e62570bdb8..86ae75b77390 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); 245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
246} 246}
247 247
248bool vlan_dev_inherit_address(struct net_device *dev,
249 struct net_device *real_dev)
250{
251 if (dev->addr_assign_type != NET_ADDR_STOLEN)
252 return false;
253
254 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
255 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
256 return true;
257}
258
248static int vlan_dev_open(struct net_device *dev) 259static int vlan_dev_open(struct net_device *dev)
249{ 260{
250 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 261 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev)
255 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 266 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
256 return -ENETDOWN; 267 return -ENETDOWN;
257 268
258 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { 269 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
270 !vlan_dev_inherit_address(dev, real_dev)) {
259 err = dev_uc_add(real_dev, dev->dev_addr); 271 err = dev_uc_add(real_dev, dev->dev_addr);
260 if (err < 0) 272 if (err < 0)
261 goto out; 273 goto out;
@@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev)
560 /* ipv6 shared card related stuff */ 572 /* ipv6 shared card related stuff */
561 dev->dev_id = real_dev->dev_id; 573 dev->dev_id = real_dev->dev_id;
562 574
563 if (is_zero_ether_addr(dev->dev_addr)) 575 if (is_zero_ether_addr(dev->dev_addr)) {
564 eth_hw_addr_inherit(dev, real_dev); 576 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
577 dev->addr_assign_type = NET_ADDR_STOLEN;
578 }
565 if (is_zero_ether_addr(dev->broadcast)) 579 if (is_zero_ether_addr(dev->broadcast))
566 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 580 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
567 581
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 4fd6af47383a..adb6e3d21b1e 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -124,7 +124,7 @@ as_indicate_complete:
124 break; 124 break;
125 case as_addparty: 125 case as_addparty:
126 case as_dropparty: 126 case as_dropparty:
127 sk->sk_err_soft = msg->reply; 127 sk->sk_err_soft = -msg->reply;
128 /* < 0 failure, otherwise ep_ref */ 128 /* < 0 failure, otherwise ep_ref */
129 clear_bit(ATM_VF_WAITING, &vcc->flags); 129 clear_bit(ATM_VF_WAITING, &vcc->flags);
130 break; 130 break;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3fa0a9ee98d1..878563a8354d 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
546 schedule(); 546 schedule();
547 } 547 }
548 finish_wait(sk_sleep(sk), &wait); 548 finish_wait(sk_sleep(sk), &wait);
549 error = xchg(&sk->sk_err_soft, 0); 549 error = -xchg(&sk->sk_err_soft, 0);
550out: 550out:
551 release_sock(sk); 551 release_sock(sk);
552 return error; 552 return error;
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
573 error = -EUNATCH; 573 error = -EUNATCH;
574 goto out; 574 goto out;
575 } 575 }
576 error = xchg(&sk->sk_err_soft, 0); 576 error = -xchg(&sk->sk_err_soft, 0);
577out: 577out:
578 release_sock(sk); 578 release_sock(sk);
579 return error; 579 return error;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index dcea4f4c62b3..c18080ad4085 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
279 * change from under us. 279 * change from under us.
280 */ 280 */
281 list_for_each_entry(v, &vg->vlan_list, vlist) { 281 list_for_each_entry(v, &vg->vlan_list, vlist) {
282 if (!br_vlan_should_use(v))
283 continue;
282 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
283 if (f && f->is_local && !f->dst) 285 if (f && f->is_local && !f->dst)
284 fdb_delete_local(br, NULL, f); 286 fdb_delete_local(br, NULL, f);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0160d7d09a1e..89469592076c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
1276 const struct ceph_osd_request_target *t, 1276 const struct ceph_osd_request_target *t,
1277 struct ceph_pg_pool_info *pi) 1277 struct ceph_pg_pool_info *pi)
1278{ 1278{
1279 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 1279 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1280 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 1280 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1281 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1281 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1282 __pool_full(pi); 1282 __pool_full(pi);
1283 1283
1284 WARN_ON(pi->id != t->base_oloc.pool); 1284 WARN_ON(pi->id != t->base_oloc.pool);
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1303 bool force_resend = false; 1303 bool force_resend = false;
1304 bool need_check_tiering = false; 1304 bool need_check_tiering = false;
1305 bool need_resend = false; 1305 bool need_resend = false;
1306 bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, 1306 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1307 CEPH_OSDMAP_SORTBITWISE);
1308 enum calc_target_result ct_res; 1307 enum calc_target_result ct_res;
1309 int ret; 1308 int ret;
1310 1309
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1540 */ 1539 */
1541 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 1540 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1542 1541
1543 dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, 1542 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1544 req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, 1543 req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1545 req->r_t.target_oid.name_len, msg->front.iov_len, data_len); 1544 msg->front.iov_len, data_len);
1546} 1545}
1547 1546
1548/* 1547/*
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
1590 verify_osdc_locked(osdc); 1589 verify_osdc_locked(osdc);
1591 WARN_ON(!osdc->osdmap->epoch); 1590 WARN_ON(!osdc->osdmap->epoch);
1592 1591
1593 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1592 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1594 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || 1593 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1595 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1594 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1596 dout("%s osdc %p continuous\n", __func__, osdc); 1595 dout("%s osdc %p continuous\n", __func__, osdc);
1597 continuous = true; 1596 continuous = true;
1598 } else { 1597 } else {
@@ -1629,19 +1628,19 @@ again:
1629 } 1628 }
1630 1629
1631 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1630 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1632 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1631 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1633 dout("req %p pausewr\n", req); 1632 dout("req %p pausewr\n", req);
1634 req->r_t.paused = true; 1633 req->r_t.paused = true;
1635 maybe_request_map(osdc); 1634 maybe_request_map(osdc);
1636 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 1635 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1637 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 1636 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1638 dout("req %p pauserd\n", req); 1637 dout("req %p pauserd\n", req);
1639 req->r_t.paused = true; 1638 req->r_t.paused = true;
1640 maybe_request_map(osdc); 1639 maybe_request_map(osdc);
1641 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1640 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1642 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 1641 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1643 CEPH_OSD_FLAG_FULL_FORCE)) && 1642 CEPH_OSD_FLAG_FULL_FORCE)) &&
1644 (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1643 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1645 pool_full(osdc, req->r_t.base_oloc.pool))) { 1644 pool_full(osdc, req->r_t.base_oloc.pool))) {
1646 dout("req %p full/pool_full\n", req); 1645 dout("req %p full/pool_full\n", req);
1647 pr_warn_ratelimited("FULL or reached pool quota\n"); 1646 pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2280 struct ceph_osd_request *req = lreq->ping_req; 2279 struct ceph_osd_request *req = lreq->ping_req;
2281 struct ceph_osd_req_op *op = &req->r_ops[0]; 2280 struct ceph_osd_req_op *op = &req->r_ops[0];
2282 2281
2283 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 2282 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2284 dout("%s PAUSERD\n", __func__); 2283 dout("%s PAUSERD\n", __func__);
2285 return; 2284 return;
2286 } 2285 }
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2893 dout("req %p tid %llu cb\n", req, req->r_tid); 2892 dout("req %p tid %llu cb\n", req, req->r_tid);
2894 __complete_request(req); 2893 __complete_request(req);
2895 } 2894 }
2895 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2896 complete_all(&req->r_safe_completion);
2897 ceph_osdc_put_request(req);
2896 } else { 2898 } else {
2897 if (req->r_unsafe_callback) { 2899 if (req->r_unsafe_callback) {
2898 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); 2900 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2901 WARN_ON(1); 2903 WARN_ON(1);
2902 } 2904 }
2903 } 2905 }
2904 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2905 complete_all(&req->r_safe_completion);
2906 2906
2907 ceph_osdc_put_request(req);
2908 return; 2907 return;
2909 2908
2910fail_request: 2909fail_request:
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3050 bool skipped_map = false; 3049 bool skipped_map = false;
3051 bool was_full; 3050 bool was_full;
3052 3051
3053 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3052 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3054 set_pool_was_full(osdc); 3053 set_pool_was_full(osdc);
3055 3054
3056 if (incremental) 3055 if (incremental)
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3088 osdc->osdmap = newmap; 3087 osdc->osdmap = newmap;
3089 } 3088 }
3090 3089
3091 was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3090 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3092 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3091 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3093 need_resend, need_resend_linger); 3092 need_resend, need_resend_linger);
3094 3093
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3174 if (ceph_check_fsid(osdc->client, &fsid) < 0) 3173 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3175 goto bad; 3174 goto bad;
3176 3175
3177 was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3176 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3178 was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3177 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3179 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3178 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3180 have_pool_full(osdc); 3179 have_pool_full(osdc);
3181 3180
3182 /* incremental maps */ 3181 /* incremental maps */
@@ -3238,9 +3237,9 @@ done:
3238 * we find out when we are no longer full and stop returning 3237 * we find out when we are no longer full and stop returning
3239 * ENOSPC. 3238 * ENOSPC.
3240 */ 3239 */
3241 pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3240 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3242 pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3241 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3243 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3242 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3244 have_pool_full(osdc); 3243 have_pool_full(osdc);
3245 if (was_pauserd || was_pausewr || pauserd || pausewr) 3244 if (was_pauserd || was_pausewr || pauserd || pausewr)
3246 maybe_request_map(osdc); 3245 maybe_request_map(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index cde52e94732f..03062bb763b3 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
1779 oid->name_len); 1779 oid->name_len);
1780 1780
1781 dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, 1781 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
1782 oid->name, raw_pgid->pool, raw_pgid->seed); 1782 raw_pgid->pool, raw_pgid->seed);
1783 return 0; 1783 return 0;
1784} 1784}
1785EXPORT_SYMBOL(ceph_object_locator_to_pg); 1785EXPORT_SYMBOL(ceph_object_locator_to_pg);
diff --git a/net/compat.c b/net/compat.c
index 5cfd26a0006f..1cd2ec046164 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
309 __scm_destroy(scm); 309 __scm_destroy(scm);
310} 310}
311 311
312static int do_set_attach_filter(struct socket *sock, int level, int optname, 312/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
313 char __user *optval, unsigned int optlen) 313struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
314{ 314{
315 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 315 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
316 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); 316 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
323 __get_user(ptr, &fprog32->filter) || 323 __get_user(ptr, &fprog32->filter) ||
324 __put_user(len, &kfprog->len) || 324 __put_user(len, &kfprog->len) ||
325 __put_user(compat_ptr(ptr), &kfprog->filter)) 325 __put_user(compat_ptr(ptr), &kfprog->filter))
326 return NULL;
327
328 return kfprog;
329}
330EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
331
332static int do_set_attach_filter(struct socket *sock, int level, int optname,
333 char __user *optval, unsigned int optlen)
334{
335 struct sock_fprog __user *kfprog;
336
337 kfprog = get_compat_bpf_fprog(optval);
338 if (!kfprog)
326 return -EFAULT; 339 return -EFAULT;
327 340
328 return sock_setsockopt(sock, level, optname, (char __user *)kfprog, 341 return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
354static int compat_sock_setsockopt(struct socket *sock, int level, int optname, 367static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
355 char __user *optval, unsigned int optlen) 368 char __user *optval, unsigned int optlen)
356{ 369{
357 if (optname == SO_ATTACH_FILTER) 370 if (optname == SO_ATTACH_FILTER ||
371 optname == SO_ATTACH_REUSEPORT_CBPF)
358 return do_set_attach_filter(sock, level, optname, 372 return do_set_attach_filter(sock, level, optname,
359 optval, optlen); 373 optval, optlen);
360 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 374 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index f96ee8b9478d..be873e4e3125 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -47,6 +47,7 @@ nla_put_failure:
47 * @xstats_type: TLV type for backward compatibility xstats TLV 47 * @xstats_type: TLV type for backward compatibility xstats TLV
48 * @lock: statistics lock 48 * @lock: statistics lock
49 * @d: dumping handle 49 * @d: dumping handle
50 * @padattr: padding attribute
50 * 51 *
51 * Initializes the dumping handle, grabs the statistic lock and appends 52 * Initializes the dumping handle, grabs the statistic lock and appends
52 * an empty TLV header to the socket buffer for use a container for all 53 * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
87 * @type: TLV type for top level statistic TLV 88 * @type: TLV type for top level statistic TLV
88 * @lock: statistics lock 89 * @lock: statistics lock
89 * @d: dumping handle 90 * @d: dumping handle
91 * @padattr: padding attribute
90 * 92 *
91 * Initializes the dumping handle, grabs the statistic lock and appends 93 * Initializes the dumping handle, grabs the statistic lock and appends
92 * an empty TLV header to the socket buffer for use a container for all 94 * an empty TLV header to the socket buffer for use a container for all
diff --git a/net/core/hwbm.c b/net/core/hwbm.c
index 941c28486896..2cab489ae62e 100644
--- a/net/core/hwbm.c
+++ b/net/core/hwbm.c
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
55 spin_lock_irqsave(&bm_pool->lock, flags); 55 spin_lock_irqsave(&bm_pool->lock, flags);
56 if (bm_pool->buf_num == bm_pool->size) { 56 if (bm_pool->buf_num == bm_pool->size) {
57 pr_warn("pool already filled\n"); 57 pr_warn("pool already filled\n");
58 spin_unlock_irqrestore(&bm_pool->lock, flags);
58 return bm_pool->buf_num; 59 return bm_pool->buf_num;
59 } 60 }
60 61
61 if (buf_num + bm_pool->buf_num > bm_pool->size) { 62 if (buf_num + bm_pool->buf_num > bm_pool->size) {
62 pr_warn("cannot allocate %d buffers for pool\n", 63 pr_warn("cannot allocate %d buffers for pool\n",
63 buf_num); 64 buf_num);
65 spin_unlock_irqrestore(&bm_pool->lock, flags);
64 return 0; 66 return 0;
65 } 67 }
66 68
67 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { 69 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
68 pr_warn("Adding %d buffers to the %d current buffers will overflow\n", 70 pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
69 buf_num, bm_pool->buf_num); 71 buf_num, bm_pool->buf_num);
72 spin_unlock_irqrestore(&bm_pool->lock, flags);
70 return 0; 73 return 0;
71 } 74 }
72 75
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 2b3f76fe65f4..7a0b616557ab 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/of_net.h>
27 28
28#include "net-sysfs.h" 29#include "net-sysfs.h"
29 30
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8604ae245960..8b02df0d354d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2245 hrtimer_set_expires(&t.timer, spin_until); 2245 hrtimer_set_expires(&t.timer, spin_until);
2246 2246
2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2248 if (remaining <= 0) { 2248 if (remaining <= 0)
2249 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2249 goto out;
2250 return;
2251 }
2252 2250
2253 start_time = ktime_get(); 2251 start_time = ktime_get();
2254 if (remaining < 100000) { 2252 if (remaining < 100000) {
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2273 } 2271 }
2274 2272
2275 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2273 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2274out:
2276 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2275 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2276 destroy_hrtimer_on_stack(&t.timer);
2277} 2277}
2278 2278
2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index ca207dbf673b..116187b5c267 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
1289 nl802154_dev_addr_policy)) 1289 nl802154_dev_addr_policy))
1290 return -EINVAL; 1290 return -EINVAL;
1291 1291
1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && 1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] && 1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || 1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) 1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
1296 return -EINVAL; 1296 return -EINVAL;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 377424ea17a4..d39e9e47a26e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
1681 */ 1681 */
1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); 1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); 1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1684
1685 /* Default values for sysctl-controlled parameters.
1686 * We set them here, in case sysctl is not compiled.
1687 */
1688 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1689 net->ipv4.sysctl_ip_dynaddr = 0;
1690 net->ipv4.sysctl_ip_early_demux = 1;
1691
1684 return 0; 1692 return 0;
1685} 1693}
1686 1694
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index bb0419582b8d..1cb67de106fe 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
999 if (!net->ipv4.sysctl_local_reserved_ports) 999 if (!net->ipv4.sysctl_local_reserved_ports)
1000 goto err_ports; 1000 goto err_ports;
1001 1001
1002 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1003 net->ipv4.sysctl_ip_dynaddr = 0;
1004 net->ipv4.sysctl_ip_early_demux = 1;
1005
1006 return 0; 1002 return 0;
1007 1003
1008err_ports: 1004err_ports:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d56c0559b477..0ff31d97d485 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1618 } 1618 }
1619 } 1619 }
1620 1620
1621 if (rcu_access_pointer(sk->sk_filter)) { 1621 if (rcu_access_pointer(sk->sk_filter) &&
1622 if (udp_lib_checksum_complete(skb)) 1622 udp_lib_checksum_complete(skb))
1623 goto csum_error; 1623 goto csum_error;
1624 if (sk_filter(sk, skb)) 1624
1625 goto drop; 1625 if (sk_filter(sk, skb))
1626 } 1626 goto drop;
1627 1627
1628 udp_csum_pull_header(skb); 1628 udp_csum_pull_header(skb);
1629 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 1629 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 3f8411328de5..2343e4f2e0bf 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -232,6 +232,15 @@ config IPV6_GRE
232 232
233 Saying M here will produce a module called ip6_gre. If unsure, say N. 233 Saying M here will produce a module called ip6_gre. If unsure, say N.
234 234
235config IPV6_FOU
236 tristate
237 default NET_FOU && IPV6
238
239config IPV6_FOU_TUNNEL
240 tristate
241 default NET_FOU_IP_TUNNELS && IPV6_FOU
242 select IPV6_TUNNEL
243
235config IPV6_MULTIPLE_TABLES 244config IPV6_MULTIPLE_TABLES
236 bool "IPv6: Multiple Routing Tables" 245 bool "IPv6: Multiple Routing Tables"
237 select FIB_RULES 246 select FIB_RULES
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 7ec3129c9ace..6d8ea099213e 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
42obj-$(CONFIG_IPV6_SIT) += sit.o 42obj-$(CONFIG_IPV6_SIT) += sit.o
43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
45obj-$(CONFIG_NET_FOU) += fou6.o 45obj-$(CONFIG_IPV6_FOU) += fou6.o
46 46
47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o 47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) 48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index c972d0b52579..9ea249b9451e 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
69} 69}
70EXPORT_SYMBOL(gue6_build_header); 70EXPORT_SYMBOL(gue6_build_header);
71 71
72#ifdef CONFIG_NET_FOU_IP_TUNNELS 72#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
73 73
74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { 74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
75 .encap_hlen = fou_encap_hlen, 75 .encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index af503f518278..fdc9de276ab1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
712 fl6->daddr = p->raddr; 712 fl6->daddr = p->raddr;
713 fl6->flowi6_oif = p->link; 713 fl6->flowi6_oif = p->link;
714 fl6->flowlabel = 0; 714 fl6->flowlabel = 0;
715 fl6->flowi6_proto = IPPROTO_GRE;
715 716
716 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 717 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
717 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 718 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1027 1028
1028 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1029 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1029 dev->mtu = ETH_DATA_LEN - t_hlen; 1030 dev->mtu = ETH_DATA_LEN - t_hlen;
1031 if (dev->type == ARPHRD_ETHER)
1032 dev->mtu -= ETH_HLEN;
1030 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1033 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1031 dev->mtu -= 8; 1034 dev->mtu -= 8;
1032 1035
@@ -1253,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
1253 if (ret) 1256 if (ret)
1254 return ret; 1257 return ret;
1255 1258
1259 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1260
1256 tunnel = netdev_priv(dev); 1261 tunnel = netdev_priv(dev);
1257 1262
1258 ip6gre_tnl_link_config(tunnel, 1); 1263 ip6gre_tnl_link_config(tunnel, 1);
@@ -1286,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1286 1291
1287 dev->features |= NETIF_F_NETNS_LOCAL; 1292 dev->features |= NETIF_F_NETNS_LOCAL;
1288 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1293 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1294 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1289} 1295}
1290 1296
1291static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1297static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cbf127ae7c67..635b8d340cdb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1071 const struct in6_addr *final_dst) 1071 const struct in6_addr *final_dst)
1072{ 1072{
1073 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 1073 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1074 int err;
1075 1074
1076 dst = ip6_sk_dst_check(sk, dst, fl6); 1075 dst = ip6_sk_dst_check(sk, dst, fl6);
1076 if (!dst)
1077 dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1077 1078
1078 err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); 1079 return dst;
1079 if (err)
1080 return ERR_PTR(err);
1081 if (final_dst)
1082 fl6->daddr = *final_dst;
1083
1084 return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1085} 1080}
1086EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); 1081EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1087 1082
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index 6989c70ae29f..4a84b5ad9ecb 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
33 fl6.daddr = *gw; 33 fl6.daddr = *gw;
34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | 34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); 35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
36 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
36 dst = ip6_route_output(net, NULL, &fl6); 37 dst = ip6_route_output(net, NULL, &fl6);
37 if (dst->error) { 38 if (dst->error) {
38 dst_release(dst); 39 dst_release(dst);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79e33e02f11a..f36c2d076fce 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1721 destp = ntohs(inet->inet_dport); 1721 destp = ntohs(inet->inet_dport);
1722 srcp = ntohs(inet->inet_sport); 1722 srcp = ntohs(inet->inet_sport);
1723 1723
1724 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1724 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1725 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1726 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1725 timer_active = 1; 1727 timer_active = 1;
1726 timer_expires = icsk->icsk_timeout; 1728 timer_expires = icsk->icsk_timeout;
1727 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1729 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2da1896af934..f421c9f23c5b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
653 } 653 }
654 } 654 }
655 655
656 if (rcu_access_pointer(sk->sk_filter)) { 656 if (rcu_access_pointer(sk->sk_filter) &&
657 if (udp_lib_checksum_complete(skb)) 657 udp_lib_checksum_complete(skb))
658 goto csum_error; 658 goto csum_error;
659 if (sk_filter(sk, skb)) 659
660 goto drop; 660 if (sk_filter(sk, skb))
661 } 661 goto drop;
662 662
663 udp_csum_pull_header(skb); 663 udp_csum_pull_header(skb);
664 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 664 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 6edfa9980314..1e40dacaa137 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1581 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1581 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1582 tunnel->encap = encap; 1582 tunnel->encap = encap;
1583 if (encap == L2TP_ENCAPTYPE_UDP) { 1583 if (encap == L2TP_ENCAPTYPE_UDP) {
1584 struct udp_tunnel_sock_cfg udp_cfg; 1584 struct udp_tunnel_sock_cfg udp_cfg = { };
1585 1585
1586 udp_cfg.sk_user_data = tunnel; 1586 udp_cfg.sk_user_data = tunnel;
1587 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; 1587 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c6f5df1bed12..6c54e03fe9c1 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
128 */ 128 */
129static int l2tp_ip6_recv(struct sk_buff *skb) 129static int l2tp_ip6_recv(struct sk_buff *skb)
130{ 130{
131 struct net *net = dev_net(skb->dev);
131 struct sock *sk; 132 struct sock *sk;
132 u32 session_id; 133 u32 session_id;
133 u32 tunnel_id; 134 u32 tunnel_id;
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
154 } 155 }
155 156
156 /* Ok, this is a data packet. Lookup the session. */ 157 /* Ok, this is a data packet. Lookup the session. */
157 session = l2tp_session_find(&init_net, NULL, session_id); 158 session = l2tp_session_find(net, NULL, session_id);
158 if (session == NULL) 159 if (session == NULL)
159 goto discard; 160 goto discard;
160 161
@@ -188,14 +189,14 @@ pass_up:
188 goto discard; 189 goto discard;
189 190
190 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 191 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
191 tunnel = l2tp_tunnel_find(&init_net, tunnel_id); 192 tunnel = l2tp_tunnel_find(net, tunnel_id);
192 if (tunnel != NULL) 193 if (tunnel != NULL)
193 sk = tunnel->sock; 194 sk = tunnel->sock;
194 else { 195 else {
195 struct ipv6hdr *iph = ipv6_hdr(skb); 196 struct ipv6hdr *iph = ipv6_hdr(skb);
196 197
197 read_lock_bh(&l2tp_ip6_lock); 198 read_lock_bh(&l2tp_ip6_lock);
198 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, 199 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
199 0, tunnel_id); 200 0, tunnel_id);
200 read_unlock_bh(&l2tp_ip6_lock); 201 read_unlock_bh(&l2tp_ip6_lock);
201 } 202 }
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
263 struct inet_sock *inet = inet_sk(sk); 264 struct inet_sock *inet = inet_sk(sk);
264 struct ipv6_pinfo *np = inet6_sk(sk); 265 struct ipv6_pinfo *np = inet6_sk(sk);
265 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
267 struct net *net = sock_net(sk);
266 __be32 v4addr = 0; 268 __be32 v4addr = 0;
267 int addr_type; 269 int addr_type;
268 int err; 270 int err;
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
286 288
287 err = -EADDRINUSE; 289 err = -EADDRINUSE;
288 read_lock_bh(&l2tp_ip6_lock); 290 read_lock_bh(&l2tp_ip6_lock);
289 if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, 291 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
290 sk->sk_bound_dev_if, addr->l2tp_conn_id)) 292 sk->sk_bound_dev_if, addr->l2tp_conn_id))
291 goto out_in_use; 293 goto out_in_use;
292 read_unlock_bh(&l2tp_ip6_lock); 294 read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
456 return 0; 458 return 0;
457 459
458drop: 460drop:
459 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); 461 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
460 kfree_skb(skb); 462 kfree_skb(skb);
461 return -1; 463 return -1;
462} 464}
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 5dba899131b3..182470847fcf 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
444 break; 444 break;
445 445
446 case LAPB_FRMR: 446 case LAPB_FRMR:
447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", 447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
448 lapb->dev, frame->pf, 448 lapb->dev, frame->pf,
449 skb->data[0], skb->data[1], skb->data[2], 449 skb->data);
450 skb->data[3], skb->data[4]);
451 lapb_establish_data_link(lapb); 450 lapb_establish_data_link(lapb);
452 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); 451 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
453 lapb_requeue_frames(lapb); 452 lapb_requeue_frames(lapb);
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index ba4d015bd1a6..482c94d9d958 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
148 } 148 }
149 } 149 }
150 150
151 lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", 151 lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
152 lapb->dev, lapb->state,
153 skb->data[0], skb->data[1], skb->data[2]);
154 152
155 if (!lapb_data_transmit(lapb, skb)) 153 if (!lapb_data_transmit(lapb, skb))
156 kfree_skb(skb); 154 kfree_skb(skb);
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 9d0a426eccbb..3c1914df641f 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
113{ 113{
114 frame->type = LAPB_ILLEGAL; 114 frame->type = LAPB_ILLEGAL;
115 115
116 lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", 116 lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
117 lapb->dev, lapb->state,
118 skb->data[0], skb->data[1], skb->data[2]);
119 117
120 /* We always need to look at 2 bytes, sometimes we need 118 /* We always need to look at 2 bytes, sometimes we need
121 * to look at 3 and those cases are handled below. 119 * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
284 dptr++; 282 dptr++;
285 *dptr++ = lapb->frmr_type; 283 *dptr++ = lapb->frmr_type;
286 284
287 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", 285 lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
288 lapb->dev, lapb->state, 286 lapb->dev, lapb->state,
289 skb->data[1], skb->data[2], skb->data[3], 287 &skb->data[1]);
290 skb->data[4], skb->data[5]);
291 } else { 288 } else {
292 dptr = skb_put(skb, 4); 289 dptr = skb_put(skb, 4);
293 *dptr++ = LAPB_FRMR; 290 *dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
299 dptr++; 296 dptr++;
300 *dptr++ = lapb->frmr_type; 297 *dptr++ = lapb->frmr_type;
301 298
302 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", 299 lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
303 lapb->dev, lapb->state, skb->data[1], 300 lapb->dev, lapb->state, &skb->data[1]);
304 skb->data[2], skb->data[3]);
305 } 301 }
306 302
307 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); 303 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 4c6404e1ad6e..21b1fdf5d01d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
161 del_timer_sync(&sta->mesh->plink_timer); 161 del_timer_sync(&sta->mesh->plink_timer);
162 } 162 }
163 163
164 /* make sure no readers can access nexthop sta from here on */
165 mesh_path_flush_by_nexthop(sta);
166 synchronize_net();
167
164 if (changed) 168 if (changed)
165 ieee80211_mbss_info_change_notify(sdata, changed); 169 ieee80211_mbss_info_change_notify(sdata, changed);
166} 170}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c8b8ccc370eb..78b0ef32dddd 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
280 u8 sa_offs, da_offs, pn_offs; 280 u8 sa_offs, da_offs, pn_offs;
281 u8 band; 281 u8 band;
282 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + 282 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
283 sizeof(rfc1042_header)]; 283 sizeof(rfc1042_header)] __aligned(2);
284 284
285 struct rcu_head rcu_head; 285 struct rcu_head rcu_head;
286}; 286};
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 2cb3c626cd43..096a45103f14 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
762 * If available, return 1, otherwise invalidate this connection 762 * If available, return 1, otherwise invalidate this connection
763 * template and return 0. 763 * template and return 0.
764 */ 764 */
765int ip_vs_check_template(struct ip_vs_conn *ct) 765int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
766{ 766{
767 struct ip_vs_dest *dest = ct->dest; 767 struct ip_vs_dest *dest = ct->dest;
768 struct netns_ipvs *ipvs = ct->ipvs; 768 struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
772 */ 772 */
773 if ((dest == NULL) || 773 if ((dest == NULL) ||
774 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 774 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
775 expire_quiescent_template(ipvs, dest)) { 775 expire_quiescent_template(ipvs, dest) ||
776 (cdest && (dest != cdest))) {
776 IP_VS_DBG_BUF(9, "check_template: dest not available for " 777 IP_VS_DBG_BUF(9, "check_template: dest not available for "
777 "protocol %s s:%s:%d v:%s:%d " 778 "protocol %s s:%s:%d v:%s:%d "
778 "-> d:%s:%d\n", 779 "-> d:%s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1207f20d24e4..2c1b498a7a27 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
321 321
322 /* Check if a template already exists */ 322 /* Check if a template already exists */
323 ct = ip_vs_ct_in_get(&param); 323 ct = ip_vs_ct_in_get(&param);
324 if (!ct || !ip_vs_check_template(ct)) { 324 if (!ct || !ip_vs_check_template(ct, NULL)) {
325 struct ip_vs_scheduler *sched; 325 struct ip_vs_scheduler *sched;
326 326
327 /* 327 /*
@@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1154 vport, &param) < 0) 1154 vport, &param) < 0)
1155 return NULL; 1155 return NULL;
1156 ct = ip_vs_ct_in_get(&param); 1156 ct = ip_vs_ct_in_get(&param);
1157 if (!ct) { 1157 /* check if template exists and points to the same dest */
1158 if (!ct || !ip_vs_check_template(ct, dest)) {
1158 ct = ip_vs_conn_new(&param, dest->af, daddr, dport, 1159 ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
1159 IP_VS_CONN_F_TEMPLATE, dest, 0); 1160 IP_VS_CONN_F_TEMPLATE, dest, 0);
1160 if (!ct) { 1161 if (!ct) {
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 883c691ec8d0..19efeba02abb 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
632 if (ret) { 632 if (ret) {
633 pr_err("failed to register helper for pf: %d port: %d\n", 633 pr_err("failed to register helper for pf: %d port: %d\n",
634 ftp[i][j].tuple.src.l3num, ports[i]); 634 ftp[i][j].tuple.src.l3num, ports[i]);
635 ports_c = i;
635 nf_conntrack_ftp_fini(); 636 nf_conntrack_ftp_fini();
636 return ret; 637 return ret;
637 } 638 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index f703adb7e5f7..196cb39649e1 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
361 361
362int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 362int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
363{ 363{
364 int ret = 0; 364 struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
365 struct nf_conntrack_helper *cur;
366 unsigned int h = helper_hash(&me->tuple); 365 unsigned int h = helper_hash(&me->tuple);
366 struct nf_conntrack_helper *cur;
367 int ret = 0;
367 368
368 BUG_ON(me->expect_policy == NULL); 369 BUG_ON(me->expect_policy == NULL);
369 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); 370 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
371 372
372 mutex_lock(&nf_ct_helper_mutex); 373 mutex_lock(&nf_ct_helper_mutex);
373 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { 374 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
374 if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && 375 if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
375 cur->tuple.src.l3num == me->tuple.src.l3num &&
376 cur->tuple.dst.protonum == me->tuple.dst.protonum) {
377 ret = -EEXIST; 376 ret = -EEXIST;
378 goto out; 377 goto out;
379 } 378 }
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 8b6da2719600..f97ac61d2536 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
271 if (ret) { 271 if (ret) {
272 pr_err("failed to register helper for pf: %u port: %u\n", 272 pr_err("failed to register helper for pf: %u port: %u\n",
273 irc[i].tuple.src.l3num, ports[i]); 273 irc[i].tuple.src.l3num, ports[i]);
274 ports_c = i;
274 nf_conntrack_irc_fini(); 275 nf_conntrack_irc_fini();
275 return ret; 276 return ret;
276 } 277 }
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 7523a575f6d1..3fcbaab83b3d 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
223 if (ret) { 223 if (ret) {
224 pr_err("failed to register helper for pf: %d port: %d\n", 224 pr_err("failed to register helper for pf: %d port: %d\n",
225 sane[i][j].tuple.src.l3num, ports[i]); 225 sane[i][j].tuple.src.l3num, ports[i]);
226 ports_c = i;
226 nf_conntrack_sane_fini(); 227 nf_conntrack_sane_fini();
227 return ret; 228 return ret;
228 } 229 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 3e06402739e0..f72ba5587588 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
1669 if (ret) { 1669 if (ret) {
1670 pr_err("failed to register helper for pf: %u port: %u\n", 1670 pr_err("failed to register helper for pf: %u port: %u\n",
1671 sip[i][j].tuple.src.l3num, ports[i]); 1671 sip[i][j].tuple.src.l3num, ports[i]);
1672 ports_c = i;
1672 nf_conntrack_sip_fini(); 1673 nf_conntrack_sip_fini();
1673 return ret; 1674 return ret;
1674 } 1675 }
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f87e84ebcec3..c026c472ea80 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
487 { } 487 { }
488}; 488};
489 489
490#define NET_NF_CONNTRACK_MAX 2089
491
492static struct ctl_table nf_ct_netfilter_table[] = { 490static struct ctl_table nf_ct_netfilter_table[] = {
493 { 491 {
494 .procname = "nf_conntrack_max", 492 .procname = "nf_conntrack_max",
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 36f964066461..2e65b5430fba 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
142 if (ret) { 142 if (ret) {
143 pr_err("failed to register helper for pf: %u port: %u\n", 143 pr_err("failed to register helper for pf: %u port: %u\n",
144 tftp[i][j].tuple.src.l3num, ports[i]); 144 tftp[i][j].tuple.src.l3num, ports[i]);
145 ports_c = i;
145 nf_conntrack_tftp_fini(); 146 nf_conntrack_tftp_fini();
146 return ret; 147 return ret;
147 } 148 }
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5baa8e24e6ac..b19ad20a705c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -26,23 +26,21 @@
26 * Once the queue is registered it must reinject all packets it 26 * Once the queue is registered it must reinject all packets it
27 * receives, no matter what. 27 * receives, no matter what.
28 */ 28 */
29static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
30 29
31/* return EBUSY when somebody else is registered, return EEXIST if the 30/* return EBUSY when somebody else is registered, return EEXIST if the
32 * same handler is registered, return 0 in case of success. */ 31 * same handler is registered, return 0 in case of success. */
33void nf_register_queue_handler(const struct nf_queue_handler *qh) 32void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
34{ 33{
35 /* should never happen, we only have one queueing backend in kernel */ 34 /* should never happen, we only have one queueing backend in kernel */
36 WARN_ON(rcu_access_pointer(queue_handler)); 35 WARN_ON(rcu_access_pointer(net->nf.queue_handler));
37 rcu_assign_pointer(queue_handler, qh); 36 rcu_assign_pointer(net->nf.queue_handler, qh);
38} 37}
39EXPORT_SYMBOL(nf_register_queue_handler); 38EXPORT_SYMBOL(nf_register_queue_handler);
40 39
41/* The caller must flush their queue before this */ 40/* The caller must flush their queue before this */
42void nf_unregister_queue_handler(void) 41void nf_unregister_queue_handler(struct net *net)
43{ 42{
44 RCU_INIT_POINTER(queue_handler, NULL); 43 RCU_INIT_POINTER(net->nf.queue_handler, NULL);
45 synchronize_rcu();
46} 44}
47EXPORT_SYMBOL(nf_unregister_queue_handler); 45EXPORT_SYMBOL(nf_unregister_queue_handler);
48 46
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
103 const struct nf_queue_handler *qh; 101 const struct nf_queue_handler *qh;
104 102
105 rcu_read_lock(); 103 rcu_read_lock();
106 qh = rcu_dereference(queue_handler); 104 qh = rcu_dereference(net->nf.queue_handler);
107 if (qh) 105 if (qh)
108 qh->nf_hook_drop(net, ops); 106 qh->nf_hook_drop(net, ops);
109 rcu_read_unlock(); 107 rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
122 struct nf_queue_entry *entry = NULL; 120 struct nf_queue_entry *entry = NULL;
123 const struct nf_afinfo *afinfo; 121 const struct nf_afinfo *afinfo;
124 const struct nf_queue_handler *qh; 122 const struct nf_queue_handler *qh;
123 struct net *net = state->net;
125 124
126 /* QUEUE == DROP if no one is waiting, to be safe. */ 125 /* QUEUE == DROP if no one is waiting, to be safe. */
127 qh = rcu_dereference(queue_handler); 126 qh = rcu_dereference(net->nf.queue_handler);
128 if (!qh) { 127 if (!qh) {
129 status = -ESRCH; 128 status = -ESRCH;
130 goto err; 129 goto err;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4d292b933b5c..7b7aa871a174 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2647 /* Only accept unspec with dump */ 2647 /* Only accept unspec with dump */
2648 if (nfmsg->nfgen_family == NFPROTO_UNSPEC) 2648 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2649 return -EAFNOSUPPORT; 2649 return -EAFNOSUPPORT;
2650 if (!nla[NFTA_SET_TABLE])
2651 return -EINVAL;
2650 2652
2651 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2653 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2652 if (IS_ERR(set)) 2654 if (IS_ERR(set))
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index aa93877ab6e2..5d36a0926b4a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
557 557
558 if (entskb->tstamp.tv64) { 558 if (entskb->tstamp.tv64) {
559 struct nfqnl_msg_packet_timestamp ts; 559 struct nfqnl_msg_packet_timestamp ts;
560 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 560 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
561 561
562 ts.sec = cpu_to_be64(kts.tv_sec); 562 ts.sec = cpu_to_be64(kts.tv_sec);
563 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 563 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1482 net->nf.proc_netfilter, &nfqnl_file_ops)) 1482 net->nf.proc_netfilter, &nfqnl_file_ops))
1483 return -ENOMEM; 1483 return -ENOMEM;
1484#endif 1484#endif
1485 nf_register_queue_handler(net, &nfqh);
1485 return 0; 1486 return 0;
1486} 1487}
1487 1488
1488static void __net_exit nfnl_queue_net_exit(struct net *net) 1489static void __net_exit nfnl_queue_net_exit(struct net *net)
1489{ 1490{
1491 nf_unregister_queue_handler(net);
1490#ifdef CONFIG_PROC_FS 1492#ifdef CONFIG_PROC_FS
1491 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1493 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1492#endif 1494#endif
1493} 1495}
1494 1496
1497static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
1498{
1499 synchronize_rcu();
1500}
1501
1495static struct pernet_operations nfnl_queue_net_ops = { 1502static struct pernet_operations nfnl_queue_net_ops = {
1496 .init = nfnl_queue_net_init, 1503 .init = nfnl_queue_net_init,
1497 .exit = nfnl_queue_net_exit, 1504 .exit = nfnl_queue_net_exit,
1498 .id = &nfnl_queue_net_id, 1505 .exit_batch = nfnl_queue_net_exit_batch,
1499 .size = sizeof(struct nfnl_queue_net), 1506 .id = &nfnl_queue_net_id,
1507 .size = sizeof(struct nfnl_queue_net),
1500}; 1508};
1501 1509
1502static int __init nfnetlink_queue_init(void) 1510static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
1517 } 1525 }
1518 1526
1519 register_netdevice_notifier(&nfqnl_dev_notifier); 1527 register_netdevice_notifier(&nfqnl_dev_notifier);
1520 nf_register_queue_handler(&nfqh);
1521 return status; 1528 return status;
1522 1529
1523cleanup_netlink_notifier: 1530cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@ out:
1529 1536
1530static void __exit nfnetlink_queue_fini(void) 1537static void __exit nfnetlink_queue_fini(void)
1531{ 1538{
1532 nf_unregister_queue_handler();
1533 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1539 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1534 nfnetlink_subsys_unregister(&nfqnl_subsys); 1540 nfnetlink_subsys_unregister(&nfqnl_subsys);
1535 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1541 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c69c892231d7..2675d580c490 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
612 return -EINVAL; 612 return -EINVAL;
613 613
614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
615 target_offset + sizeof(struct compat_xt_standard_target) != next_offset) 615 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
616 return -EINVAL; 616 return -EINVAL;
617 617
618 /* compat_xt_entry match has less strict aligment requirements, 618 /* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
694 return -EINVAL; 694 return -EINVAL;
695 695
696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
697 target_offset + sizeof(struct xt_standard_target) != next_offset) 697 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
698 return -EINVAL; 698 return -EINVAL;
699 699
700 return xt_check_entry_match(elems, base + target_offset, 700 return xt_check_entry_match(elems, base + target_offset,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 879185fe183f..9a3eb7a0ebf4 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
137 return !!key->eth.type; 137 return !!key->eth.type;
138} 138}
139 139
140static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 __be16 ethertype)
142{
143 if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 __be16 diff[] = { ~(hdr->h_proto), ethertype };
145
146 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 ~skb->csum);
148 }
149
150 hdr->h_proto = ethertype;
151}
152
140static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, 153static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
141 const struct ovs_action_push_mpls *mpls) 154 const struct ovs_action_push_mpls *mpls)
142{ 155{
143 __be32 *new_mpls_lse; 156 __be32 *new_mpls_lse;
144 struct ethhdr *hdr;
145 157
146 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ 158 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
147 if (skb->encapsulation) 159 if (skb->encapsulation)
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
160 172
161 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); 173 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
162 174
163 hdr = eth_hdr(skb); 175 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
164 hdr->h_proto = mpls->mpls_ethertype;
165
166 if (!skb->inner_protocol) 176 if (!skb->inner_protocol)
167 skb_set_inner_protocol(skb, skb->protocol); 177 skb_set_inner_protocol(skb, skb->protocol);
168 skb->protocol = mpls->mpls_ethertype; 178 skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
193 * field correctly in the presence of VLAN tags. 203 * field correctly in the presence of VLAN tags.
194 */ 204 */
195 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); 205 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
196 hdr->h_proto = ethertype; 206 update_ethertype(skb, hdr, ethertype);
197 if (eth_p_mpls(skb->protocol)) 207 if (eth_p_mpls(skb->protocol))
198 skb->protocol = ethertype; 208 skb->protocol = ethertype;
199 209
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4040eb92d9c9..9bff6ef16fa7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,6 +93,7 @@
93#include <net/inet_common.h> 93#include <net/inet_common.h>
94#endif 94#endif
95#include <linux/bpf.h> 95#include <linux/bpf.h>
96#include <net/compat.h>
96 97
97#include "internal.h" 98#include "internal.h"
98 99
@@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3940} 3941}
3941 3942
3942 3943
3944#ifdef CONFIG_COMPAT
3945static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3946 char __user *optval, unsigned int optlen)
3947{
3948 struct packet_sock *po = pkt_sk(sock->sk);
3949
3950 if (level != SOL_PACKET)
3951 return -ENOPROTOOPT;
3952
3953 if (optname == PACKET_FANOUT_DATA &&
3954 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3955 optval = (char __user *)get_compat_bpf_fprog(optval);
3956 if (!optval)
3957 return -EFAULT;
3958 optlen = sizeof(struct sock_fprog);
3959 }
3960
3961 return packet_setsockopt(sock, level, optname, optval, optlen);
3962}
3963#endif
3964
3943static int packet_notifier(struct notifier_block *this, 3965static int packet_notifier(struct notifier_block *this,
3944 unsigned long msg, void *ptr) 3966 unsigned long msg, void *ptr)
3945{ 3967{
@@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = {
4416 .shutdown = sock_no_shutdown, 4438 .shutdown = sock_no_shutdown,
4417 .setsockopt = packet_setsockopt, 4439 .setsockopt = packet_setsockopt,
4418 .getsockopt = packet_getsockopt, 4440 .getsockopt = packet_getsockopt,
4441#ifdef CONFIG_COMPAT
4442 .compat_setsockopt = compat_packet_setsockopt,
4443#endif
4419 .sendmsg = packet_sendmsg, 4444 .sendmsg = packet_sendmsg,
4420 .recvmsg = packet_recvmsg, 4445 .recvmsg = packet_recvmsg,
4421 .mmap = packet_mmap, 4446 .mmap = packet_mmap,
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 80256b08eac0..387df5f32e49 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -74,6 +74,7 @@ enum {
74 RDS_CONN_CONNECTING, 74 RDS_CONN_CONNECTING,
75 RDS_CONN_DISCONNECTING, 75 RDS_CONN_DISCONNECTING,
76 RDS_CONN_UP, 76 RDS_CONN_UP,
77 RDS_CONN_RESETTING,
77 RDS_CONN_ERROR, 78 RDS_CONN_ERROR,
78}; 79};
79 80
@@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
813void rds_shutdown_worker(struct work_struct *); 814void rds_shutdown_worker(struct work_struct *);
814void rds_send_worker(struct work_struct *); 815void rds_send_worker(struct work_struct *);
815void rds_recv_worker(struct work_struct *); 816void rds_recv_worker(struct work_struct *);
817void rds_connect_path_complete(struct rds_connection *conn, int curr);
816void rds_connect_complete(struct rds_connection *conn); 818void rds_connect_complete(struct rds_connection *conn);
817 819
818/* transport.c */ 820/* transport.c */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c0be1ecd11c9..8413f6c99e13 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
561 minfo.fport = inc->i_hdr.h_dport; 561 minfo.fport = inc->i_hdr.h_dport;
562 } 562 }
563 563
564 minfo.flags = 0;
565
564 rds_info_copy(iter, &minfo, sizeof(minfo)); 566 rds_info_copy(iter, &minfo, sizeof(minfo));
565} 567}
diff --git a/net/rds/send.c b/net/rds/send.c
index c9cdb358ea88..b1962f8e30f7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue); 99 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags); 100 spin_unlock_irqrestore(&conn->c_lock, flags);
101} 101}
102EXPORT_SYMBOL_GPL(rds_send_reset);
102 103
103static int acquire_in_xmit(struct rds_connection *conn) 104static int acquire_in_xmit(struct rds_connection *conn)
104{ 105{
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 86187dad1440..74ee126a6fe6 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
126} 126}
127 127
128/* 128/*
129 * This is the only path that sets tc->t_sock. Send and receive trust that 129 * rds_tcp_reset_callbacks() switches the to the new sock and
130 * it is set. The RDS_CONN_UP bit protects those paths from being 130 * returns the existing tc->t_sock.
131 * called while it isn't set. 131 *
132 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
133 * and rds_tcp_reset_callbacks. Send and receive trust that
134 * it is set. The absence of RDS_CONN_UP bit protects those paths
135 * from being called while it isn't set.
136 */
137void rds_tcp_reset_callbacks(struct socket *sock,
138 struct rds_connection *conn)
139{
140 struct rds_tcp_connection *tc = conn->c_transport_data;
141 struct socket *osock = tc->t_sock;
142
143 if (!osock)
144 goto newsock;
145
146 /* Need to resolve a duelling SYN between peers.
147 * We have an outstanding SYN to this peer, which may
148 * potentially have transitioned to the RDS_CONN_UP state,
149 * so we must quiesce any send threads before resetting
150 * c_transport_data. We quiesce these threads by setting
151 * c_state to something other than RDS_CONN_UP, and then
152 * waiting for any existing threads in rds_send_xmit to
153 * complete release_in_xmit(). (Subsequent threads entering
154 * rds_send_xmit() will bail on !rds_conn_up().
155 *
156 * However an incoming syn-ack at this point would end up
157 * marking the conn as RDS_CONN_UP, and would again permit
158 * rds_send_xmi() threads through, so ideally we would
159 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
160 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
161 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
162 * would not get set. As a result, we set c_state to
163 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
164 * cannot mark rds_conn_path_up() in the window before lock_sock()
165 */
166 atomic_set(&conn->c_state, RDS_CONN_RESETTING);
167 wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
168 lock_sock(osock->sk);
169 /* reset receive side state for rds_tcp_data_recv() for osock */
170 if (tc->t_tinc) {
171 rds_inc_put(&tc->t_tinc->ti_inc);
172 tc->t_tinc = NULL;
173 }
174 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
175 tc->t_tinc_data_rem = 0;
176 tc->t_sock = NULL;
177
178 write_lock_bh(&osock->sk->sk_callback_lock);
179
180 osock->sk->sk_user_data = NULL;
181 osock->sk->sk_data_ready = tc->t_orig_data_ready;
182 osock->sk->sk_write_space = tc->t_orig_write_space;
183 osock->sk->sk_state_change = tc->t_orig_state_change;
184 write_unlock_bh(&osock->sk->sk_callback_lock);
185 release_sock(osock->sk);
186 sock_release(osock);
187newsock:
188 rds_send_reset(conn);
189 lock_sock(sock->sk);
190 write_lock_bh(&sock->sk->sk_callback_lock);
191 tc->t_sock = sock;
192 sock->sk->sk_user_data = conn;
193 sock->sk->sk_data_ready = rds_tcp_data_ready;
194 sock->sk->sk_write_space = rds_tcp_write_space;
195 sock->sk->sk_state_change = rds_tcp_state_change;
196
197 write_unlock_bh(&sock->sk->sk_callback_lock);
198 release_sock(sock->sk);
199}
200
201/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
202 * above rds_tcp_reset_callbacks for notes about synchronization
203 * with data path
132 */ 204 */
133void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 205void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
134{ 206{
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 41c228300525..ec0602b0dc24 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -50,6 +50,7 @@ struct rds_tcp_statistics {
50void rds_tcp_tune(struct socket *sock); 50void rds_tcp_tune(struct socket *sock);
51void rds_tcp_nonagle(struct socket *sock); 51void rds_tcp_nonagle(struct socket *sock);
52void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); 52void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
53void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
53void rds_tcp_restore_callbacks(struct socket *sock, 54void rds_tcp_restore_callbacks(struct socket *sock,
54 struct rds_tcp_connection *tc); 55 struct rds_tcp_connection *tc);
55u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 56u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index fb82e0a0bf89..fba13d0305fb 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk)
60 case TCP_SYN_RECV: 60 case TCP_SYN_RECV:
61 break; 61 break;
62 case TCP_ESTABLISHED: 62 case TCP_ESTABLISHED:
63 rds_connect_complete(conn); 63 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
64 break; 64 break;
65 case TCP_CLOSE_WAIT: 65 case TCP_CLOSE_WAIT:
66 case TCP_CLOSE: 66 case TCP_CLOSE:
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 4bf4befe5066..686b1d03a558 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
78 struct inet_sock *inet; 78 struct inet_sock *inet;
79 struct rds_tcp_connection *rs_tcp = NULL; 79 struct rds_tcp_connection *rs_tcp = NULL;
80 int conn_state; 80 int conn_state;
81 struct sock *nsk;
82 81
83 if (!sock) /* module unload or netns delete in progress */ 82 if (!sock) /* module unload or netns delete in progress */
84 return -ENETUNREACH; 83 return -ENETUNREACH;
@@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
136 !conn->c_outgoing) { 135 !conn->c_outgoing) {
137 goto rst_nsk; 136 goto rst_nsk;
138 } else { 137 } else {
139 atomic_set(&conn->c_state, RDS_CONN_CONNECTING); 138 rds_tcp_reset_callbacks(new_sock, conn);
140 wait_event(conn->c_waitq,
141 !test_bit(RDS_IN_XMIT, &conn->c_flags));
142 rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
143 conn->c_outgoing = 0; 139 conn->c_outgoing = 0;
140 /* rds_connect_path_complete() marks RDS_CONN_UP */
141 rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
144 } 142 }
143 } else {
144 rds_tcp_set_callbacks(new_sock, conn);
145 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
145 } 146 }
146 rds_tcp_set_callbacks(new_sock, conn);
147 rds_connect_complete(conn); /* marks RDS_CONN_UP */
148 new_sock = NULL; 147 new_sock = NULL;
149 ret = 0; 148 ret = 0;
150 goto out; 149 goto out;
151rst_nsk: 150rst_nsk:
152 /* reset the newly returned accept sock and bail */ 151 /* reset the newly returned accept sock and bail */
153 nsk = new_sock->sk; 152 kernel_sock_shutdown(new_sock, SHUT_RDWR);
154 rds_tcp_stats_inc(s_tcp_listen_closed_stale);
155 nsk->sk_user_data = NULL;
156 nsk->sk_prot->disconnect(nsk, 0);
157 tcp_done(nsk);
158 new_sock = NULL;
159 ret = 0; 153 ret = 0;
160out: 154out:
161 if (rs_tcp) 155 if (rs_tcp)
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 454aa6d23327..4a323045719b 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -71,9 +71,9 @@
71struct workqueue_struct *rds_wq; 71struct workqueue_struct *rds_wq;
72EXPORT_SYMBOL_GPL(rds_wq); 72EXPORT_SYMBOL_GPL(rds_wq);
73 73
74void rds_connect_complete(struct rds_connection *conn) 74void rds_connect_path_complete(struct rds_connection *conn, int curr)
75{ 75{
76 if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { 76 if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
77 printk(KERN_WARNING "%s: Cannot transition to state UP, " 77 printk(KERN_WARNING "%s: Cannot transition to state UP, "
78 "current state is %d\n", 78 "current state is %d\n",
79 __func__, 79 __func__,
@@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
90 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 90 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
92} 92}
93EXPORT_SYMBOL_GPL(rds_connect_path_complete);
94
95void rds_connect_complete(struct rds_connection *conn)
96{
97 rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
98}
93EXPORT_SYMBOL_GPL(rds_connect_complete); 99EXPORT_SYMBOL_GPL(rds_connect_complete);
94 100
95/* 101/*
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b726a046a7d..bab56ed649ba 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1162,9 +1162,7 @@ static int rxkad_init(void)
1162 /* pin the cipher we need so that the crypto layer doesn't invoke 1162 /* pin the cipher we need so that the crypto layer doesn't invoke
1163 * keventd to go get it */ 1163 * keventd to go get it */
1164 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1164 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
1165 if (IS_ERR(rxkad_ci)) 1165 return PTR_ERR_OR_ZERO(rxkad_ci);
1166 return PTR_ERR(rxkad_ci);
1167 return 0;
1168} 1166}
1169 1167
1170/* 1168/*
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 330f14e302e8..c557789765dc 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -38,7 +38,7 @@ struct tcf_police {
38 bool peak_present; 38 bool peak_present;
39}; 39};
40#define to_police(pc) \ 40#define to_police(pc) \
41 container_of(pc, struct tcf_police, common) 41 container_of(pc->priv, struct tcf_police, common)
42 42
43#define POL_TAB_MASK 15 43#define POL_TAB_MASK 15
44 44
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
119 struct nlattr *est, struct tc_action *a, 119 struct nlattr *est, struct tc_action *a,
120 int ovr, int bind) 120 int ovr, int bind)
121{ 121{
122 unsigned int h;
123 int ret = 0, err; 122 int ret = 0, err;
124 struct nlattr *tb[TCA_POLICE_MAX + 1]; 123 struct nlattr *tb[TCA_POLICE_MAX + 1];
125 struct tc_police *parm; 124 struct tc_police *parm;
126 struct tcf_police *police; 125 struct tcf_police *police;
127 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
128 struct tc_action_net *tn = net_generic(net, police_net_id); 127 struct tc_action_net *tn = net_generic(net, police_net_id);
129 struct tcf_hashinfo *hinfo = tn->hinfo;
130 int size; 128 int size;
131 129
132 if (nla == NULL) 130 if (nla == NULL)
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
145 143
146 if (parm->index) { 144 if (parm->index) {
147 if (tcf_hash_search(tn, a, parm->index)) { 145 if (tcf_hash_search(tn, a, parm->index)) {
148 police = to_police(a->priv); 146 police = to_police(a);
149 if (bind) { 147 if (bind) {
150 police->tcf_bindcnt += 1; 148 police->tcf_bindcnt += 1;
151 police->tcf_refcnt += 1; 149 police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
156 /* not replacing */ 154 /* not replacing */
157 return -EEXIST; 155 return -EEXIST;
158 } 156 }
157 } else {
158 ret = tcf_hash_create(tn, parm->index, NULL, a,
159 sizeof(*police), bind, false);
160 if (ret)
161 return ret;
162 ret = ACT_P_CREATED;
159 } 163 }
160 164
161 police = kzalloc(sizeof(*police), GFP_KERNEL); 165 police = to_police(a);
162 if (police == NULL)
163 return -ENOMEM;
164 ret = ACT_P_CREATED;
165 police->tcf_refcnt = 1;
166 spin_lock_init(&police->tcf_lock);
167 if (bind)
168 police->tcf_bindcnt = 1;
169override: 166override:
170 if (parm->rate.rate) { 167 if (parm->rate.rate) {
171 err = -ENOMEM; 168 err = -ENOMEM;
@@ -237,14 +234,8 @@ override:
237 return ret; 234 return ret;
238 235
239 police->tcfp_t_c = ktime_get_ns(); 236 police->tcfp_t_c = ktime_get_ns();
240 police->tcf_index = parm->index ? parm->index : 237 tcf_hash_insert(tn, a);
241 tcf_hash_new_index(tn);
242 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
243 spin_lock_bh(&hinfo->lock);
244 hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
245 spin_unlock_bh(&hinfo->lock);
246 238
247 a->priv = police;
248 return ret; 239 return ret;
249 240
250failure_unlock: 241failure_unlock:
@@ -253,7 +244,7 @@ failure:
253 qdisc_put_rtab(P_tab); 244 qdisc_put_rtab(P_tab);
254 qdisc_put_rtab(R_tab); 245 qdisc_put_rtab(R_tab);
255 if (ret == ACT_P_CREATED) 246 if (ret == ACT_P_CREATED)
256 kfree(police); 247 tcf_hash_cleanup(a, est);
257 return err; 248 return err;
258} 249}
259 250
@@ -268,6 +259,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
268 spin_lock(&police->tcf_lock); 259 spin_lock(&police->tcf_lock);
269 260
270 bstats_update(&police->tcf_bstats, skb); 261 bstats_update(&police->tcf_bstats, skb);
262 tcf_lastuse_update(&police->tcf_tm);
271 263
272 if (police->tcfp_ewma_rate && 264 if (police->tcfp_ewma_rate &&
273 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 265 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
@@ -327,6 +319,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
327 .refcnt = police->tcf_refcnt - ref, 319 .refcnt = police->tcf_refcnt - ref,
328 .bindcnt = police->tcf_bindcnt - bind, 320 .bindcnt = police->tcf_bindcnt - bind,
329 }; 321 };
322 struct tcf_t t;
330 323
331 if (police->rate_present) 324 if (police->rate_present)
332 psched_ratecfg_getrate(&opt.rate, &police->rate); 325 psched_ratecfg_getrate(&opt.rate, &police->rate);
@@ -340,6 +333,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
340 if (police->tcfp_ewma_rate && 333 if (police->tcfp_ewma_rate &&
341 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) 334 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
342 goto nla_put_failure; 335 goto nla_put_failure;
336
337 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
338 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
339 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
340 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
341 goto nla_put_failure;
342
343 return skb->len; 343 return skb->len;
344 344
345nla_put_failure: 345nla_put_failure:
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 730aacafc22d..b3b7978f4182 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
171 struct tc_cls_flower_offload offload = {0}; 171 struct tc_cls_flower_offload offload = {0};
172 struct tc_to_netdev tc; 172 struct tc_to_netdev tc;
173 173
174 if (!tc_should_offload(dev, 0)) 174 if (!tc_should_offload(dev, tp, 0))
175 return; 175 return;
176 176
177 offload.command = TC_CLSFLOWER_DESTROY; 177 offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
194 struct tc_cls_flower_offload offload = {0}; 194 struct tc_cls_flower_offload offload = {0};
195 struct tc_to_netdev tc; 195 struct tc_to_netdev tc;
196 196
197 if (!tc_should_offload(dev, flags)) 197 if (!tc_should_offload(dev, tp, flags))
198 return; 198 return;
199 199
200 offload.command = TC_CLSFLOWER_REPLACE; 200 offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
216 struct tc_cls_flower_offload offload = {0}; 216 struct tc_cls_flower_offload offload = {0};
217 struct tc_to_netdev tc; 217 struct tc_to_netdev tc;
218 218
219 if (!tc_should_offload(dev, 0)) 219 if (!tc_should_offload(dev, tp, 0))
220 return; 220 return;
221 221
222 offload.command = TC_CLSFLOWER_STATS; 222 offload.command = TC_CLSFLOWER_STATS;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 079b43b3c5d2..ffe593efe930 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
440 offload.type = TC_SETUP_CLSU32; 440 offload.type = TC_SETUP_CLSU32;
441 offload.cls_u32 = &u32_offload; 441 offload.cls_u32 = &u32_offload;
442 442
443 if (tc_should_offload(dev, 0)) { 443 if (tc_should_offload(dev, tp, 0)) {
444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; 444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
445 offload.cls_u32->knode.handle = handle; 445 offload.cls_u32->knode.handle = handle;
446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
457 struct tc_to_netdev offload; 457 struct tc_to_netdev offload;
458 int err; 458 int err;
459 459
460 if (!tc_should_offload(dev, tp, flags))
461 return tc_skip_sw(flags) ? -EINVAL : 0;
462
460 offload.type = TC_SETUP_CLSU32; 463 offload.type = TC_SETUP_CLSU32;
461 offload.cls_u32 = &u32_offload; 464 offload.cls_u32 = &u32_offload;
462 465
463 if (tc_should_offload(dev, flags)) { 466 offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
464 offload.cls_u32->command = TC_CLSU32_NEW_HNODE; 467 offload.cls_u32->hnode.divisor = h->divisor;
465 offload.cls_u32->hnode.divisor = h->divisor; 468 offload.cls_u32->hnode.handle = h->handle;
466 offload.cls_u32->hnode.handle = h->handle; 469 offload.cls_u32->hnode.prio = h->prio;
467 offload.cls_u32->hnode.prio = h->prio;
468 470
469 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 471 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
470 tp->protocol, &offload); 472 tp->protocol, &offload);
471 if (tc_skip_sw(flags)) 473 if (tc_skip_sw(flags))
472 return err; 474 return err;
473 }
474 475
475 return 0; 476 return 0;
476} 477}
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
484 offload.type = TC_SETUP_CLSU32; 485 offload.type = TC_SETUP_CLSU32;
485 offload.cls_u32 = &u32_offload; 486 offload.cls_u32 = &u32_offload;
486 487
487 if (tc_should_offload(dev, 0)) { 488 if (tc_should_offload(dev, tp, 0)) {
488 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; 489 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
489 offload.cls_u32->hnode.divisor = h->divisor; 490 offload.cls_u32->hnode.divisor = h->divisor;
490 offload.cls_u32->hnode.handle = h->handle; 491 offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
507 offload.type = TC_SETUP_CLSU32; 508 offload.type = TC_SETUP_CLSU32;
508 offload.cls_u32 = &u32_offload; 509 offload.cls_u32 = &u32_offload;
509 510
510 if (tc_should_offload(dev, flags)) { 511 if (!tc_should_offload(dev, tp, flags))
511 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 512 return tc_skip_sw(flags) ? -EINVAL : 0;
512 offload.cls_u32->knode.handle = n->handle; 513
513 offload.cls_u32->knode.fshift = n->fshift; 514 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
515 offload.cls_u32->knode.handle = n->handle;
516 offload.cls_u32->knode.fshift = n->fshift;
514#ifdef CONFIG_CLS_U32_MARK 517#ifdef CONFIG_CLS_U32_MARK
515 offload.cls_u32->knode.val = n->val; 518 offload.cls_u32->knode.val = n->val;
516 offload.cls_u32->knode.mask = n->mask; 519 offload.cls_u32->knode.mask = n->mask;
517#else 520#else
518 offload.cls_u32->knode.val = 0; 521 offload.cls_u32->knode.val = 0;
519 offload.cls_u32->knode.mask = 0; 522 offload.cls_u32->knode.mask = 0;
520#endif 523#endif
521 offload.cls_u32->knode.sel = &n->sel; 524 offload.cls_u32->knode.sel = &n->sel;
522 offload.cls_u32->knode.exts = &n->exts; 525 offload.cls_u32->knode.exts = &n->exts;
523 if (n->ht_down) 526 if (n->ht_down)
524 offload.cls_u32->knode.link_handle = n->ht_down->handle; 527 offload.cls_u32->knode.link_handle = n->ht_down->handle;
525 528
526 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 529 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
527 tp->protocol, &offload); 530 tp->protocol, &offload);
528 if (tc_skip_sw(flags)) 531 if (tc_skip_sw(flags))
529 return err; 532 return err;
530 }
531 533
532 return 0; 534 return 0;
533} 535}
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
863 if (tb[TCA_U32_FLAGS]) { 865 if (tb[TCA_U32_FLAGS]) {
864 flags = nla_get_u32(tb[TCA_U32_FLAGS]); 866 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
865 if (!tc_flags_valid(flags)) 867 if (!tc_flags_valid(flags))
866 return err; 868 return -EINVAL;
867 } 869 }
868 870
869 n = (struct tc_u_knode *)*arg; 871 n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
921 ht->divisor = divisor; 923 ht->divisor = divisor;
922 ht->handle = handle; 924 ht->handle = handle;
923 ht->prio = tp->prio; 925 ht->prio = tp->prio;
926
927 err = u32_replace_hw_hnode(tp, ht, flags);
928 if (err) {
929 kfree(ht);
930 return err;
931 }
932
924 RCU_INIT_POINTER(ht->next, tp_c->hlist); 933 RCU_INIT_POINTER(ht->next, tp_c->hlist);
925 rcu_assign_pointer(tp_c->hlist, ht); 934 rcu_assign_pointer(tp_c->hlist, ht);
926 *arg = (unsigned long)ht; 935 *arg = (unsigned long)ht;
927 936
928 u32_replace_hw_hnode(tp, ht, flags);
929 return 0; 937 return 0;
930 } 938 }
931 939
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 64f71a2155f3..ddf047df5361 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
607 if (throttle) 607 if (throttle)
608 qdisc_throttled(wd->qdisc); 608 qdisc_throttled(wd->qdisc);
609 609
610 if (wd->last_expires == expires)
611 return;
612
613 wd->last_expires = expires;
610 hrtimer_start(&wd->timer, 614 hrtimer_start(&wd->timer,
611 ns_to_ktime(expires), 615 ns_to_ktime(expires),
612 HRTIMER_MODE_ABS_PINNED); 616 HRTIMER_MODE_ABS_PINNED);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a63e879e8975..bf8af2c43c2c 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
375 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
376 } 376 }
377 377
378 qdisc_qstats_backlog_inc(sch, skb);
378 sch->q.qlen++; 379 sch->q.qlen++;
379 return err; 380 return err;
380} 381}
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
407 408
408 bstats_update(&cl->bstats, skb); 409 bstats_update(&cl->bstats, skb);
409 qdisc_bstats_update(sch, skb); 410 qdisc_bstats_update(sch, skb);
411 qdisc_qstats_backlog_dec(sch, skb);
410 sch->q.qlen--; 412 sch->q.qlen--;
411 return skb; 413 return skb;
412 } 414 }
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
428 if (cl->qdisc->ops->drop) { 430 if (cl->qdisc->ops->drop) {
429 len = cl->qdisc->ops->drop(cl->qdisc); 431 len = cl->qdisc->ops->drop(cl->qdisc);
430 if (len > 0) { 432 if (len > 0) {
433 sch->qstats.backlog -= len;
431 sch->q.qlen--; 434 sch->q.qlen--;
432 if (cl->qdisc->q.qlen == 0) 435 if (cl->qdisc->q.qlen == 0)
433 list_del(&cl->alist); 436 list_del(&cl->alist);
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
463 qdisc_reset(cl->qdisc); 466 qdisc_reset(cl->qdisc);
464 } 467 }
465 } 468 }
469 sch->qstats.backlog = 0;
466 sch->q.qlen = 0; 470 sch->q.qlen = 0;
467} 471}
468 472
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6883a8971562..da250b2e06ae 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
199 unsigned int idx, prev_backlog, prev_qlen; 199 unsigned int idx, prev_backlog, prev_qlen;
200 struct fq_codel_flow *flow; 200 struct fq_codel_flow *flow;
201 int uninitialized_var(ret); 201 int uninitialized_var(ret);
202 unsigned int pkt_len;
202 bool memory_limited; 203 bool memory_limited;
203 204
204 idx = fq_codel_classify(skb, sch, &ret); 205 idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
230 prev_backlog = sch->qstats.backlog; 231 prev_backlog = sch->qstats.backlog;
231 prev_qlen = sch->q.qlen; 232 prev_qlen = sch->q.qlen;
232 233
234 /* save this packet length as it might be dropped by fq_codel_drop() */
235 pkt_len = qdisc_pkt_len(skb);
233 /* fq_codel_drop() is quite expensive, as it performs a linear search 236 /* fq_codel_drop() is quite expensive, as it performs a linear search
234 * in q->backlogs[] to find a fat flow. 237 * in q->backlogs[] to find a fat flow.
235 * So instead of dropping a single packet, drop half of its backlog 238 * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
237 */ 240 */
238 ret = fq_codel_drop(sch, q->drop_batch_size); 241 ret = fq_codel_drop(sch, q->drop_batch_size);
239 242
240 q->drop_overlimit += prev_qlen - sch->q.qlen; 243 prev_qlen -= sch->q.qlen;
244 prev_backlog -= sch->qstats.backlog;
245 q->drop_overlimit += prev_qlen;
241 if (memory_limited) 246 if (memory_limited)
242 q->drop_overmemory += prev_qlen - sch->q.qlen; 247 q->drop_overmemory += prev_qlen;
243 /* As we dropped packet(s), better let upper stack know this */
244 qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
245 prev_backlog - sch->qstats.backlog);
246 248
247 return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; 249 /* As we dropped packet(s), better let upper stack know this.
250 * If we dropped a packet for this flow, return NET_XMIT_CN,
251 * but in this case, our parents wont increase their backlogs.
252 */
253 if (ret == idx) {
254 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
255 prev_backlog - pkt_len);
256 return NET_XMIT_CN;
257 }
258 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
259 return NET_XMIT_SUCCESS;
248} 260}
249 261
250/* This is the specific function called from codel_dequeue() 262/* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
649 qs.backlog = q->backlogs[idx]; 661 qs.backlog = q->backlogs[idx];
650 qs.drops = flow->dropped; 662 qs.drops = flow->dropped;
651 } 663 }
652 if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) 664 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
653 return -1; 665 return -1;
654 if (idx < q->flows_cnt) 666 if (idx < q->flows_cnt)
655 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 667 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 269dd71b3828..f9e0e9c03d0a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
49{ 49{
50 q->gso_skb = skb; 50 q->gso_skb = skb;
51 q->qstats.requeues++; 51 q->qstats.requeues++;
52 qdisc_qstats_backlog_inc(q, skb);
52 q->q.qlen++; /* it's still part of the queue */ 53 q->q.qlen++; /* it's still part of the queue */
53 __netif_schedule(q); 54 __netif_schedule(q);
54 55
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
92 txq = skb_get_tx_queue(txq->dev, skb); 93 txq = skb_get_tx_queue(txq->dev, skb);
93 if (!netif_xmit_frozen_or_stopped(txq)) { 94 if (!netif_xmit_frozen_or_stopped(txq)) {
94 q->gso_skb = NULL; 95 q->gso_skb = NULL;
96 qdisc_qstats_backlog_dec(q, skb);
95 q->q.qlen--; 97 q->q.qlen--;
96 } else 98 } else
97 skb = NULL; 99 skb = NULL;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d783d7cc3348..1ac9f9f03fe3 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1529 q->eligible = RB_ROOT; 1529 q->eligible = RB_ROOT;
1530 INIT_LIST_HEAD(&q->droplist); 1530 INIT_LIST_HEAD(&q->droplist);
1531 qdisc_watchdog_cancel(&q->watchdog); 1531 qdisc_watchdog_cancel(&q->watchdog);
1532 sch->qstats.backlog = 0;
1532 sch->q.qlen = 0; 1533 sch->q.qlen = 0;
1533} 1534}
1534 1535
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1559 struct hfsc_sched *q = qdisc_priv(sch); 1560 struct hfsc_sched *q = qdisc_priv(sch);
1560 unsigned char *b = skb_tail_pointer(skb); 1561 unsigned char *b = skb_tail_pointer(skb);
1561 struct tc_hfsc_qopt qopt; 1562 struct tc_hfsc_qopt qopt;
1562 struct hfsc_class *cl;
1563 unsigned int i;
1564
1565 sch->qstats.backlog = 0;
1566 for (i = 0; i < q->clhash.hashsize; i++) {
1567 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1568 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1569 }
1570 1563
1571 qopt.defcls = q->defcls; 1564 qopt.defcls = q->defcls;
1572 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1565 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1604 if (cl->qdisc->q.qlen == 1) 1597 if (cl->qdisc->q.qlen == 1)
1605 set_active(cl, qdisc_pkt_len(skb)); 1598 set_active(cl, qdisc_pkt_len(skb));
1606 1599
1600 qdisc_qstats_backlog_inc(sch, skb);
1607 sch->q.qlen++; 1601 sch->q.qlen++;
1608 1602
1609 return NET_XMIT_SUCCESS; 1603 return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
1672 1666
1673 qdisc_unthrottled(sch); 1667 qdisc_unthrottled(sch);
1674 qdisc_bstats_update(sch, skb); 1668 qdisc_bstats_update(sch, skb);
1669 qdisc_qstats_backlog_dec(sch, skb);
1675 sch->q.qlen--; 1670 sch->q.qlen--;
1676 1671
1677 return skb; 1672 return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
1695 } 1690 }
1696 cl->qstats.drops++; 1691 cl->qstats.drops++;
1697 qdisc_qstats_drop(sch); 1692 qdisc_qstats_drop(sch);
1693 sch->qstats.backlog -= len;
1698 sch->q.qlen--; 1694 sch->q.qlen--;
1699 return len; 1695 return len;
1700 } 1696 }
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f6bf5818ed4d..d4b4218af6b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -928,17 +928,10 @@ ok:
928 } 928 }
929 } 929 }
930 qdisc_qstats_overlimit(sch); 930 qdisc_qstats_overlimit(sch);
931 if (likely(next_event > q->now)) { 931 if (likely(next_event > q->now))
932 if (!test_bit(__QDISC_STATE_DEACTIVATED, 932 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
933 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { 933 else
934 ktime_t time = ns_to_ktime(next_event);
935 qdisc_throttled(q->watchdog.qdisc);
936 hrtimer_start(&q->watchdog.timer, time,
937 HRTIMER_MODE_ABS_PINNED);
938 }
939 } else {
940 schedule_work(&q->work); 934 schedule_work(&q->work);
941 }
942fin: 935fin:
943 return skb; 936 return skb;
944} 937}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 10adbc617905..8fe6999b642a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
27 return TC_H_MIN(classid) + 1; 27 return TC_H_MIN(classid) + 1;
28} 28}
29 29
30static bool ingress_cl_offload(u32 classid)
31{
32 return true;
33}
34
30static unsigned long ingress_bind_filter(struct Qdisc *sch, 35static unsigned long ingress_bind_filter(struct Qdisc *sch,
31 unsigned long parent, u32 classid) 36 unsigned long parent, u32 classid)
32{ 37{
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
86 .put = ingress_put, 91 .put = ingress_put,
87 .walk = ingress_walk, 92 .walk = ingress_walk,
88 .tcf_chain = ingress_find_tcf, 93 .tcf_chain = ingress_find_tcf,
94 .tcf_cl_offload = ingress_cl_offload,
89 .bind_tcf = ingress_bind_filter, 95 .bind_tcf = ingress_bind_filter,
90 .unbind_tcf = ingress_put, 96 .unbind_tcf = ingress_put,
91}; 97};
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
110 } 116 }
111} 117}
112 118
119static bool clsact_cl_offload(u32 classid)
120{
121 return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
122}
123
113static unsigned long clsact_bind_filter(struct Qdisc *sch, 124static unsigned long clsact_bind_filter(struct Qdisc *sch,
114 unsigned long parent, u32 classid) 125 unsigned long parent, u32 classid)
115{ 126{
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
158 .put = ingress_put, 169 .put = ingress_put,
159 .walk = ingress_walk, 170 .walk = ingress_walk,
160 .tcf_chain = clsact_find_tcf, 171 .tcf_chain = clsact_find_tcf,
172 .tcf_cl_offload = clsact_cl_offload,
161 .bind_tcf = clsact_bind_filter, 173 .bind_tcf = clsact_bind_filter,
162 .unbind_tcf = ingress_put, 174 .unbind_tcf = ingress_put,
163}; 175};
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fee1b15506b2..4b0a82191bc4 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
85 85
86 ret = qdisc_enqueue(skb, qdisc); 86 ret = qdisc_enqueue(skb, qdisc);
87 if (ret == NET_XMIT_SUCCESS) { 87 if (ret == NET_XMIT_SUCCESS) {
88 qdisc_qstats_backlog_inc(sch, skb);
88 sch->q.qlen++; 89 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 90 return NET_XMIT_SUCCESS;
90 } 91 }
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
117 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); 118 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
118 if (skb) { 119 if (skb) {
119 qdisc_bstats_update(sch, skb); 120 qdisc_bstats_update(sch, skb);
121 qdisc_qstats_backlog_dec(sch, skb);
120 sch->q.qlen--; 122 sch->q.qlen--;
121 return skb; 123 return skb;
122 } 124 }
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
135 for (prio = q->bands-1; prio >= 0; prio--) { 137 for (prio = q->bands-1; prio >= 0; prio--) {
136 qdisc = q->queues[prio]; 138 qdisc = q->queues[prio];
137 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { 139 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
140 sch->qstats.backlog -= len;
138 sch->q.qlen--; 141 sch->q.qlen--;
139 return len; 142 return len;
140 } 143 }
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
151 154
152 for (prio = 0; prio < q->bands; prio++) 155 for (prio = 0; prio < q->bands; prio++)
153 qdisc_reset(q->queues[prio]); 156 qdisc_reset(q->queues[prio]);
157 sch->qstats.backlog = 0;
154 sch->q.qlen = 0; 158 sch->q.qlen = 0;
155} 159}
156 160
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 8d2d8d953432..f18857febdad 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1235 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1235 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight,
1237 qdisc_pkt_len(skb)); 1237 qdisc_pkt_len(skb));
1238 if (err) 1238 if (err) {
1239 return err; 1239 cl->qstats.drops++;
1240 return qdisc_drop(skb, sch);
1241 }
1240 } 1242 }
1241 1243
1242 err = qdisc_enqueue(skb, cl->qdisc); 1244 err = qdisc_enqueue(skb, cl->qdisc);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8c0508c0e287..91578bdd378c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
97 97
98 ret = qdisc_enqueue(skb, child); 98 ret = qdisc_enqueue(skb, child);
99 if (likely(ret == NET_XMIT_SUCCESS)) { 99 if (likely(ret == NET_XMIT_SUCCESS)) {
100 qdisc_qstats_backlog_inc(sch, skb);
100 sch->q.qlen++; 101 sch->q.qlen++;
101 } else if (net_xmit_drop_count(ret)) { 102 } else if (net_xmit_drop_count(ret)) {
102 q->stats.pdrop++; 103 q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
118 skb = child->dequeue(child); 119 skb = child->dequeue(child);
119 if (skb) { 120 if (skb) {
120 qdisc_bstats_update(sch, skb); 121 qdisc_bstats_update(sch, skb);
122 qdisc_qstats_backlog_dec(sch, skb);
121 sch->q.qlen--; 123 sch->q.qlen--;
122 } else { 124 } else {
123 if (!red_is_idling(&q->vars)) 125 if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) { 145 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144 q->stats.other++; 146 q->stats.other++;
145 qdisc_qstats_drop(sch); 147 qdisc_qstats_drop(sch);
148 sch->qstats.backlog -= len;
146 sch->q.qlen--; 149 sch->q.qlen--;
147 return len; 150 return len;
148 } 151 }
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
158 struct red_sched_data *q = qdisc_priv(sch); 161 struct red_sched_data *q = qdisc_priv(sch);
159 162
160 qdisc_reset(q->qdisc); 163 qdisc_reset(q->qdisc);
164 sch->qstats.backlog = 0;
161 sch->q.qlen = 0; 165 sch->q.qlen = 0;
162 red_restart(&q->vars); 166 red_restart(&q->vars);
163} 167}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 83b90b584fae..3161e491990b 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
207 return ret; 207 return ret;
208 } 208 }
209 209
210 qdisc_qstats_backlog_inc(sch, skb);
210 sch->q.qlen++; 211 sch->q.qlen++;
211 return NET_XMIT_SUCCESS; 212 return NET_XMIT_SUCCESS;
212} 213}
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
217 unsigned int len = 0; 218 unsigned int len = 0;
218 219
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { 220 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
221 sch->qstats.backlog -= len;
220 sch->q.qlen--; 222 sch->q.qlen--;
221 qdisc_qstats_drop(sch); 223 qdisc_qstats_drop(sch);
222 } 224 }
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
263 q->t_c = now; 265 q->t_c = now;
264 q->tokens = toks; 266 q->tokens = toks;
265 q->ptokens = ptoks; 267 q->ptokens = ptoks;
268 qdisc_qstats_backlog_dec(sch, skb);
266 sch->q.qlen--; 269 sch->q.qlen--;
267 qdisc_unthrottled(sch); 270 qdisc_unthrottled(sch);
268 qdisc_bstats_update(sch, skb); 271 qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
294 struct tbf_sched_data *q = qdisc_priv(sch); 297 struct tbf_sched_data *q = qdisc_priv(sch);
295 298
296 qdisc_reset(q->qdisc); 299 qdisc_reset(q->qdisc);
300 sch->qstats.backlog = 0;
297 sch->q.qlen = 0; 301 sch->q.qlen = 0;
298 q->t_c = ktime_get_ns(); 302 q->t_c = ktime_get_ns();
299 q->tokens = q->buffer; 303 q->tokens = q->buffer;
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 8e3e769dc9ea..1ce724b87618 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
356 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
357 goto next; 357 goto next;
358 358
359 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
360 goto next;
361
359 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
360 sk->sk_family != r->sdiag_family) 363 sk->sk_family != r->sdiag_family)
361 goto next; 364 goto next;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 777d0324594a..67154b848aa9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4220 info->sctpi_s_disable_fragments = sp->disable_fragments; 4220 info->sctpi_s_disable_fragments = sp->disable_fragments;
4221 info->sctpi_s_v4mapped = sp->v4mapped; 4221 info->sctpi_s_v4mapped = sp->v4mapped;
4222 info->sctpi_s_frag_interleave = sp->frag_interleave; 4222 info->sctpi_s_frag_interleave = sp->frag_interleave;
4223 info->sctpi_s_type = sp->type;
4223 4224
4224 return 0; 4225 return 0;
4225 } 4226 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 06b4df9faaa1..2808d550d273 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -446,16 +446,27 @@ out_no_rpciod:
446 return ERR_PTR(err); 446 return ERR_PTR(err);
447} 447}
448 448
449struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 449static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
450 struct rpc_xprt *xprt) 450 struct rpc_xprt *xprt)
451{ 451{
452 struct rpc_clnt *clnt = NULL; 452 struct rpc_clnt *clnt = NULL;
453 struct rpc_xprt_switch *xps; 453 struct rpc_xprt_switch *xps;
454 454
455 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 455 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
456 if (xps == NULL) 456 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
457 return ERR_PTR(-ENOMEM); 457 xps = args->bc_xprt->xpt_bc_xps;
458 458 xprt_switch_get(xps);
459 } else {
460 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
461 if (xps == NULL) {
462 xprt_put(xprt);
463 return ERR_PTR(-ENOMEM);
464 }
465 if (xprt->bc_xprt) {
466 xprt_switch_get(xps);
467 xprt->bc_xprt->xpt_bc_xps = xps;
468 }
469 }
459 clnt = rpc_new_client(args, xps, xprt, NULL); 470 clnt = rpc_new_client(args, xps, xprt, NULL);
460 if (IS_ERR(clnt)) 471 if (IS_ERR(clnt))
461 return clnt; 472 return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
483 494
484 return clnt; 495 return clnt;
485} 496}
486EXPORT_SYMBOL_GPL(rpc_create_xprt);
487 497
488/** 498/**
489 * rpc_create - create an RPC client and transport with one call 499 * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
509 }; 519 };
510 char servername[48]; 520 char servername[48];
511 521
522 if (args->bc_xprt) {
523 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
524 xprt = args->bc_xprt->xpt_bc_xprt;
525 if (xprt) {
526 xprt_get(xprt);
527 return rpc_create_xprt(args, xprt);
528 }
529 }
530
512 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 531 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
513 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 532 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
514 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 533 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index f5572e31d518..4f01f63102ee 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
136 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 136 /* See comment on corresponding get in xs_setup_bc_tcp(): */
137 if (xprt->xpt_bc_xprt) 137 if (xprt->xpt_bc_xprt)
138 xprt_put(xprt->xpt_bc_xprt); 138 xprt_put(xprt->xpt_bc_xprt);
139 if (xprt->xpt_bc_xps)
140 xprt_switch_put(xprt->xpt_bc_xps);
139 xprt->xpt_ops->xpo_free(xprt); 141 xprt->xpt_ops->xpo_free(xprt);
140 module_put(owner); 142 module_put(owner);
141} 143}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d3e0c42361e..7e2b2fa189c3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3057 return xprt; 3057 return xprt;
3058 3058
3059 args->bc_xprt->xpt_bc_xprt = NULL; 3059 args->bc_xprt->xpt_bc_xprt = NULL;
3060 args->bc_xprt->xpt_bc_xps = NULL;
3060 xprt_put(xprt); 3061 xprt_put(xprt);
3061 ret = ERR_PTR(-EINVAL); 3062 ret = ERR_PTR(-EINVAL);
3062out_err: 3063out_err:
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4dfc5c14f8c3..3ad9fab1985f 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
346 struct nlattr **attrs) 346 struct nlattr **attrs)
347{ 347{
348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; 348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
349 int err;
350
351 if (!attrs[TIPC_NLA_BEARER])
352 return -EINVAL;
349 353
350 nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], 354 err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
351 NULL); 355 attrs[TIPC_NLA_BEARER], NULL);
356 if (err)
357 return err;
352 358
353 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, 359 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
354 nla_data(bearer[TIPC_NLA_BEARER_NAME]), 360 nla_data(bearer[TIPC_NLA_BEARER_NAME]),
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
460 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 466 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
461 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; 467 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
462 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; 468 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
469 int err;
463 470
464 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 471 if (!attrs[TIPC_NLA_LINK])
472 return -EINVAL;
465 473
466 nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], 474 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
467 NULL); 475 NULL);
476 if (err)
477 return err;
478
479 if (!link[TIPC_NLA_LINK_PROP])
480 return -EINVAL;
468 481
469 nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], 482 err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
470 NULL); 483 link[TIPC_NLA_LINK_PROP], NULL);
484 if (err)
485 return err;
486
487 if (!link[TIPC_NLA_LINK_STATS])
488 return -EINVAL;
489
490 err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
491 link[TIPC_NLA_LINK_STATS], NULL);
492 if (err)
493 return err;
471 494
472 name = (char *)TLV_DATA(msg->req); 495 name = (char *)TLV_DATA(msg->req);
473 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) 496 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -569,12 +592,20 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
569{ 592{
570 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 593 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
571 struct tipc_link_info link_info; 594 struct tipc_link_info link_info;
595 int err;
572 596
573 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 597 if (!attrs[TIPC_NLA_LINK])
598 return -EINVAL;
599
600 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
601 NULL);
602 if (err)
603 return err;
574 604
575 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
576 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
577 strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); 607 nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
608 TIPC_MAX_LINK_NAME);
578 609
579 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, 610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
580 &link_info, sizeof(link_info)); 611 &link_info, sizeof(link_info));
@@ -758,12 +789,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
758 u32 node, depth, type, lowbound, upbound; 789 u32 node, depth, type, lowbound, upbound;
759 static const char * const scope_str[] = {"", " zone", " cluster", 790 static const char * const scope_str[] = {"", " zone", " cluster",
760 " node"}; 791 " node"};
792 int err;
761 793
762 nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, 794 if (!attrs[TIPC_NLA_NAME_TABLE])
763 attrs[TIPC_NLA_NAME_TABLE], NULL); 795 return -EINVAL;
764 796
765 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], 797 err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
766 NULL); 798 attrs[TIPC_NLA_NAME_TABLE], NULL);
799 if (err)
800 return err;
801
802 if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
803 return -EINVAL;
804
805 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
806 nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
807 if (err)
808 return err;
767 809
768 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); 810 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
769 811
@@ -815,8 +857,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
815{ 857{
816 u32 type, lower, upper; 858 u32 type, lower, upper;
817 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; 859 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
860 int err;
818 861
819 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); 862 if (!attrs[TIPC_NLA_PUBL])
863 return -EINVAL;
864
865 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
866 NULL);
867 if (err)
868 return err;
820 869
821 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); 870 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
822 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); 871 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
@@ -876,7 +925,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
876 u32 sock_ref; 925 u32 sock_ref;
877 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 926 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
878 927
879 nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); 928 if (!attrs[TIPC_NLA_SOCK])
929 return -EINVAL;
930
931 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
932 NULL);
933 if (err)
934 return err;
880 935
881 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 936 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
882 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); 937 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
@@ -917,9 +972,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
917 struct nlattr **attrs) 972 struct nlattr **attrs)
918{ 973{
919 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; 974 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
975 int err;
976
977 if (!attrs[TIPC_NLA_MEDIA])
978 return -EINVAL;
920 979
921 nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], 980 err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
922 NULL); 981 NULL);
982 if (err)
983 return err;
923 984
924 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, 985 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
925 nla_data(media[TIPC_NLA_MEDIA_NAME]), 986 nla_data(media[TIPC_NLA_MEDIA_NAME]),
@@ -931,8 +992,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
931{ 992{
932 struct tipc_node_info node_info; 993 struct tipc_node_info node_info;
933 struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; 994 struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
995 int err;
934 996
935 nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); 997 if (!attrs[TIPC_NLA_NODE])
998 return -EINVAL;
999
1000 err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
1001 NULL);
1002 if (err)
1003 return err;
936 1004
937 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); 1005 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
938 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); 1006 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
@@ -971,8 +1039,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
971{ 1039{
972 __be32 id; 1040 __be32 id;
973 struct nlattr *net[TIPC_NLA_NET_MAX + 1]; 1041 struct nlattr *net[TIPC_NLA_NET_MAX + 1];
1042 int err;
1043
1044 if (!attrs[TIPC_NLA_NET])
1045 return -EINVAL;
1046
1047 err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
1048 NULL);
1049 if (err)
1050 return err;
974 1051
975 nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
976 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); 1052 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
977 1053
978 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); 1054 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 80aa6a3e6817..735362c26c8e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry; 316 struct dentry *dentry = unix_sk(s)->path.dentry;
317 317
318 if (dentry && d_backing_inode(dentry) == i) { 318 if (dentry && d_real_inode(dentry) == i) {
319 sock_hold(s); 319 sock_hold(s);
320 goto found; 320 goto found;
321 } 321 }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); 911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
912 if (err) 912 if (err)
913 goto fail; 913 goto fail;
914 inode = d_backing_inode(path.dentry); 914 inode = d_real_inode(path.dentry);
915 err = inode_permission(inode, MAY_WRITE); 915 err = inode_permission(inode, MAY_WRITE);
916 if (err) 916 if (err)
917 goto put_fail; 917 goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1048 goto out_up; 1048 goto out_up;
1049 } 1049 }
1050 addr->hash = UNIX_HASH_SIZE; 1050 addr->hash = UNIX_HASH_SIZE;
1051 hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1051 hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1052 spin_lock(&unix_table_lock); 1052 spin_lock(&unix_table_lock);
1053 u->path = u_path; 1053 u->path = u_path;
1054 list = &unix_socket_table[hash]; 1054 list = &unix_socket_table[hash];
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d25c82bc1bbe..ecca3896b9f7 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
363 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); 363 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
364 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); 364 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
365 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); 365 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
366 WARN_ON(ops->set_tx_power && !ops->get_tx_power);
367 WARN_ON(ops->set_antenna && !ops->get_antenna);
368 366
369 alloc_size = sizeof(*rdev) + sizeof_priv; 367 alloc_size = sizeof(*rdev) + sizeof_priv;
370 368
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6250b1cfcde5..dbb2738e356a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
958 return private(dev, iwr, cmd, info, handler); 958 return private(dev, iwr, cmd, info, handler);
959 } 959 }
960 /* Old driver API : call driver ioctl handler */ 960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl) 961 if (dev->netdev_ops->ndo_do_ioctl) {
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 962#ifdef CONFIG_COMPAT
963 if (info->flags & IW_REQUEST_FLAG_COMPAT) {
964 int ret = 0;
965 struct iwreq iwr_lcl;
966 struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
967
968 memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
969 iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
970 iwr_lcl.u.data.length = iwp_compat->length;
971 iwr_lcl.u.data.flags = iwp_compat->flags;
972
973 ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
974
975 iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
976 iwp_compat->length = iwr_lcl.u.data.length;
977 iwp_compat->flags = iwr_lcl.u.data.flags;
978
979 return ret;
980 } else
981#endif
982 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
983 }
963 return -EOPNOTSUPP; 984 return -EOPNOTSUPP;
964} 985}
965 986
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6750595bd7b8..4904ced676d4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2454,6 +2454,7 @@ sub process {
2454 2454
2455# Check for git id commit length and improperly formed commit descriptions 2455# Check for git id commit length and improperly formed commit descriptions
2456 if ($in_commit_log && !$commit_log_possible_stack_dump && 2456 if ($in_commit_log && !$commit_log_possible_stack_dump &&
2457 $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
2457 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || 2458 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
2458 ($line =~ /\b[0-9a-f]{12,40}\b/i && 2459 ($line =~ /\b[0-9a-f]{12,40}\b/i &&
2459 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && 2460 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 2fc8fad5195e..27757c21551a 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -59,6 +59,12 @@ Output format selection (mutually exclusive):
59 -text Output plain text format. 59 -text Output plain text format.
60 60
61Output selection (mutually exclusive): 61Output selection (mutually exclusive):
62 -export Only output documentation for symbols that have been
63 exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL()
64 in the same FILE.
65 -internal Only output documentation for symbols that have NOT been
66 exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL()
67 in the same FILE.
62 -function NAME Only output documentation for the given function(s) 68 -function NAME Only output documentation for the given function(s)
63 or DOC: section title(s). All other functions and DOC: 69 or DOC: section title(s). All other functions and DOC:
64 sections are ignored. May be specified multiple times. 70 sections are ignored. May be specified multiple times.
@@ -68,6 +74,8 @@ Output selection (mutually exclusive):
68 74
69Output selection modifiers: 75Output selection modifiers:
70 -no-doc-sections Do not output DOC: sections. 76 -no-doc-sections Do not output DOC: sections.
77 -enable-lineno Enable output of #define LINENO lines. Only works with
78 reStructuredText format.
71 79
72Other parameters: 80Other parameters:
73 -v Verbose output, more warnings and other information. 81 -v Verbose output, more warnings and other information.
@@ -206,6 +214,10 @@ my $type_struct_xml = '\\&amp;((struct\s*)*[_\w]+)';
206my $type_env = '(\$\w+)'; 214my $type_env = '(\$\w+)';
207my $type_enum_full = '\&(enum)\s*([_\w]+)'; 215my $type_enum_full = '\&(enum)\s*([_\w]+)';
208my $type_struct_full = '\&(struct)\s*([_\w]+)'; 216my $type_struct_full = '\&(struct)\s*([_\w]+)';
217my $type_typedef_full = '\&(typedef)\s*([_\w]+)';
218my $type_union_full = '\&(union)\s*([_\w]+)';
219my $type_member = '\&([_\w]+)((\.|->)[_\w]+)';
220my $type_member_func = $type_member . '\(\)';
209 221
210# Output conversion substitutions. 222# Output conversion substitutions.
211# One for each output format 223# One for each output format
@@ -274,10 +286,16 @@ my $blankline_text = "";
274# rst-mode 286# rst-mode
275my @highlights_rst = ( 287my @highlights_rst = (
276 [$type_constant, "``\$1``"], 288 [$type_constant, "``\$1``"],
277 [$type_func, "\\:c\\:func\\:`\$1`"], 289 # Note: need to escape () to avoid func matching later
290 [$type_member_func, "\\:c\\:type\\:`\$1\$2\\\\(\\\\) <\$1>`"],
291 [$type_member, "\\:c\\:type\\:`\$1\$2 <\$1>`"],
292 [$type_func, "\\:c\\:func\\:`\$1()`"],
278 [$type_struct_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], 293 [$type_struct_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
279 [$type_enum_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"], 294 [$type_enum_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
280 [$type_struct, "\\:c\\:type\\:`struct \$1 <\$1>`"], 295 [$type_typedef_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
296 [$type_union_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
297 # in rst this can refer to any type
298 [$type_struct, "\\:c\\:type\\:`\$1`"],
281 [$type_param, "**\$1**"] 299 [$type_param, "**\$1**"]
282 ); 300 );
283my $blankline_rst = "\n"; 301my $blankline_rst = "\n";
@@ -303,10 +321,19 @@ my $verbose = 0;
303my $output_mode = "man"; 321my $output_mode = "man";
304my $output_preformatted = 0; 322my $output_preformatted = 0;
305my $no_doc_sections = 0; 323my $no_doc_sections = 0;
324my $enable_lineno = 0;
306my @highlights = @highlights_man; 325my @highlights = @highlights_man;
307my $blankline = $blankline_man; 326my $blankline = $blankline_man;
308my $modulename = "Kernel API"; 327my $modulename = "Kernel API";
309my $function_only = 0; 328
329use constant {
330 OUTPUT_ALL => 0, # output all symbols and doc sections
331 OUTPUT_INCLUDE => 1, # output only specified symbols
332 OUTPUT_EXCLUDE => 2, # output everything except specified symbols
333 OUTPUT_EXPORTED => 3, # output exported symbols
334 OUTPUT_INTERNAL => 4, # output non-exported symbols
335};
336my $output_selection = OUTPUT_ALL;
310my $show_not_found = 0; 337my $show_not_found = 0;
311 338
312my @build_time; 339my @build_time;
@@ -327,6 +354,7 @@ my $man_date = ('January', 'February', 'March', 'April', 'May', 'June',
327# CAVEAT EMPTOR! Some of the others I localised may not want to be, which 354# CAVEAT EMPTOR! Some of the others I localised may not want to be, which
328# could cause "use of undefined value" or other bugs. 355# could cause "use of undefined value" or other bugs.
329my ($function, %function_table, %parametertypes, $declaration_purpose); 356my ($function, %function_table, %parametertypes, $declaration_purpose);
357my $declaration_start_line;
330my ($type, $declaration_name, $return_type); 358my ($type, $declaration_name, $return_type);
331my ($newsection, $newcontents, $prototype, $brcount, %source_map); 359my ($newsection, $newcontents, $prototype, $brcount, %source_map);
332 360
@@ -344,52 +372,62 @@ my $section_counter = 0;
344 372
345my $lineprefix=""; 373my $lineprefix="";
346 374
347# states 375# Parser states
348# 0 - normal code 376use constant {
349# 1 - looking for function name 377 STATE_NORMAL => 0, # normal code
350# 2 - scanning field start. 378 STATE_NAME => 1, # looking for function name
351# 3 - scanning prototype. 379 STATE_FIELD => 2, # scanning field start
352# 4 - documentation block 380 STATE_PROTO => 3, # scanning prototype
353# 5 - gathering documentation outside main block 381 STATE_DOCBLOCK => 4, # documentation block
382 STATE_INLINE => 5, # gathering documentation outside main block
383};
354my $state; 384my $state;
355my $in_doc_sect; 385my $in_doc_sect;
356 386
357# Split Doc State 387# Inline documentation state
358# 0 - Invalid (Before start or after finish) 388use constant {
359# 1 - Is started (the /** was found inside a struct) 389 STATE_INLINE_NA => 0, # not applicable ($state != STATE_INLINE)
360# 2 - The @parameter header was found, start accepting multi paragraph text. 390 STATE_INLINE_NAME => 1, # looking for member name (@foo:)
361# 3 - Finished (the */ was found) 391 STATE_INLINE_TEXT => 2, # looking for member documentation
362# 4 - Error - Comment without header was found. Spit a warning as it's not 392 STATE_INLINE_END => 3, # done
363# proper kernel-doc and ignore the rest. 393 STATE_INLINE_ERROR => 4, # error - Comment without header was found.
364my $split_doc_state; 394 # Spit a warning as it's not
395 # proper kernel-doc and ignore the rest.
396};
397my $inline_doc_state;
365 398
366#declaration types: can be 399#declaration types: can be
367# 'function', 'struct', 'union', 'enum', 'typedef' 400# 'function', 'struct', 'union', 'enum', 'typedef'
368my $decl_type; 401my $decl_type;
369 402
370my $doc_special = "\@\%\$\&";
371
372my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start. 403my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start.
373my $doc_end = '\*/'; 404my $doc_end = '\*/';
374my $doc_com = '\s*\*\s*'; 405my $doc_com = '\s*\*\s*';
375my $doc_com_body = '\s*\* ?'; 406my $doc_com_body = '\s*\* ?';
376my $doc_decl = $doc_com . '(\w+)'; 407my $doc_decl = $doc_com . '(\w+)';
377my $doc_sect = $doc_com . '([' . $doc_special . ']?[\w\s]+):(.*)'; 408# @params and a strictly limited set of supported section names
409my $doc_sect = $doc_com .
410 '\s*(\@\w+|description|context|returns?|notes?|examples?)\s*:(.*)';
378my $doc_content = $doc_com_body . '(.*)'; 411my $doc_content = $doc_com_body . '(.*)';
379my $doc_block = $doc_com . 'DOC:\s*(.*)?'; 412my $doc_block = $doc_com . 'DOC:\s*(.*)?';
380my $doc_split_start = '^\s*/\*\*\s*$'; 413my $doc_inline_start = '^\s*/\*\*\s*$';
381my $doc_split_sect = '\s*\*\s*(@[\w\s]+):(.*)'; 414my $doc_inline_sect = '\s*\*\s*(@[\w\s]+):(.*)';
382my $doc_split_end = '^\s*\*/\s*$'; 415my $doc_inline_end = '^\s*\*/\s*$';
416my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;';
383 417
384my %constants;
385my %parameterdescs; 418my %parameterdescs;
419my %parameterdesc_start_lines;
386my @parameterlist; 420my @parameterlist;
387my %sections; 421my %sections;
388my @sectionlist; 422my @sectionlist;
423my %section_start_lines;
389my $sectcheck; 424my $sectcheck;
390my $struct_actual; 425my $struct_actual;
391 426
392my $contents = ""; 427my $contents = "";
428my $new_start_line = 0;
429
430# the canonical section names. see also $doc_sect above.
393my $section_default = "Description"; # default section 431my $section_default = "Description"; # default section
394my $section_intro = "Introduction"; 432my $section_intro = "Introduction";
395my $section = $section_default; 433my $section = $section_default;
@@ -437,19 +475,27 @@ while ($ARGV[0] =~ m/^-(.*)/) {
437 } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document 475 } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
438 $modulename = shift @ARGV; 476 $modulename = shift @ARGV;
439 } elsif ($cmd eq "-function") { # to only output specific functions 477 } elsif ($cmd eq "-function") { # to only output specific functions
440 $function_only = 1; 478 $output_selection = OUTPUT_INCLUDE;
441 $function = shift @ARGV; 479 $function = shift @ARGV;
442 $function_table{$function} = 1; 480 $function_table{$function} = 1;
443 } elsif ($cmd eq "-nofunction") { # to only output specific functions 481 } elsif ($cmd eq "-nofunction") { # output all except specific functions
444 $function_only = 2; 482 $output_selection = OUTPUT_EXCLUDE;
445 $function = shift @ARGV; 483 $function = shift @ARGV;
446 $function_table{$function} = 1; 484 $function_table{$function} = 1;
485 } elsif ($cmd eq "-export") { # only exported symbols
486 $output_selection = OUTPUT_EXPORTED;
487 %function_table = ()
488 } elsif ($cmd eq "-internal") { # only non-exported symbols
489 $output_selection = OUTPUT_INTERNAL;
490 %function_table = ()
447 } elsif ($cmd eq "-v") { 491 } elsif ($cmd eq "-v") {
448 $verbose = 1; 492 $verbose = 1;
449 } elsif (($cmd eq "-h") || ($cmd eq "--help")) { 493 } elsif (($cmd eq "-h") || ($cmd eq "--help")) {
450 usage(); 494 usage();
451 } elsif ($cmd eq '-no-doc-sections') { 495 } elsif ($cmd eq '-no-doc-sections') {
452 $no_doc_sections = 1; 496 $no_doc_sections = 1;
497 } elsif ($cmd eq '-enable-lineno') {
498 $enable_lineno = 1;
453 } elsif ($cmd eq '-show-not-found') { 499 } elsif ($cmd eq '-show-not-found') {
454 $show_not_found = 1; 500 $show_not_found = 1;
455 } 501 }
@@ -467,6 +513,13 @@ sub get_kernel_version() {
467 return $version; 513 return $version;
468} 514}
469 515
516#
517sub print_lineno {
518 my $lineno = shift;
519 if ($enable_lineno && defined($lineno)) {
520 print "#define LINENO " . $lineno . "\n";
521 }
522}
470## 523##
471# dumps section contents to arrays/hashes intended for that purpose. 524# dumps section contents to arrays/hashes intended for that purpose.
472# 525#
@@ -475,28 +528,32 @@ sub dump_section {
475 my $name = shift; 528 my $name = shift;
476 my $contents = join "\n", @_; 529 my $contents = join "\n", @_;
477 530
478 if ($name =~ m/$type_constant/) { 531 if ($name =~ m/$type_param/) {
479 $name = $1;
480# print STDERR "constant section '$1' = '$contents'\n";
481 $constants{$name} = $contents;
482 } elsif ($name =~ m/$type_param/) {
483# print STDERR "parameter def '$1' = '$contents'\n"; 532# print STDERR "parameter def '$1' = '$contents'\n";
484 $name = $1; 533 $name = $1;
485 $parameterdescs{$name} = $contents; 534 $parameterdescs{$name} = $contents;
486 $sectcheck = $sectcheck . $name . " "; 535 $sectcheck = $sectcheck . $name . " ";
536 $parameterdesc_start_lines{$name} = $new_start_line;
537 $new_start_line = 0;
487 } elsif ($name eq "@\.\.\.") { 538 } elsif ($name eq "@\.\.\.") {
488# print STDERR "parameter def '...' = '$contents'\n"; 539# print STDERR "parameter def '...' = '$contents'\n";
489 $name = "..."; 540 $name = "...";
490 $parameterdescs{$name} = $contents; 541 $parameterdescs{$name} = $contents;
491 $sectcheck = $sectcheck . $name . " "; 542 $sectcheck = $sectcheck . $name . " ";
543 $parameterdesc_start_lines{$name} = $new_start_line;
544 $new_start_line = 0;
492 } else { 545 } else {
493# print STDERR "other section '$name' = '$contents'\n"; 546# print STDERR "other section '$name' = '$contents'\n";
494 if (defined($sections{$name}) && ($sections{$name} ne "")) { 547 if (defined($sections{$name}) && ($sections{$name} ne "")) {
495 print STDERR "${file}:$.: error: duplicate section name '$name'\n"; 548 print STDERR "${file}:$.: warning: duplicate section name '$name'\n";
496 ++$errors; 549 ++$warnings;
550 $sections{$name} .= $contents;
551 } else {
552 $sections{$name} = $contents;
553 push @sectionlist, $name;
554 $section_start_lines{$name} = $new_start_line;
555 $new_start_line = 0;
497 } 556 }
498 $sections{$name} = $contents;
499 push @sectionlist, $name;
500 } 557 }
501} 558}
502 559
@@ -512,15 +569,17 @@ sub dump_doc_section {
512 return; 569 return;
513 } 570 }
514 571
515 if (($function_only == 0) || 572 if (($output_selection == OUTPUT_ALL) ||
516 ( $function_only == 1 && defined($function_table{$name})) || 573 ($output_selection == OUTPUT_INCLUDE &&
517 ( $function_only == 2 && !defined($function_table{$name}))) 574 defined($function_table{$name})) ||
575 ($output_selection == OUTPUT_EXCLUDE &&
576 !defined($function_table{$name})))
518 { 577 {
519 dump_section($file, $name, $contents); 578 dump_section($file, $name, $contents);
520 output_blockhead({'sectionlist' => \@sectionlist, 579 output_blockhead({'sectionlist' => \@sectionlist,
521 'sections' => \%sections, 580 'sections' => \%sections,
522 'module' => $modulename, 581 'module' => $modulename,
523 'content-only' => ($function_only != 0), }); 582 'content-only' => ($output_selection != OUTPUT_ALL), });
524 } 583 }
525} 584}
526 585
@@ -1736,7 +1795,10 @@ sub output_blockhead_rst(%) {
1736 my ($parameter, $section); 1795 my ($parameter, $section);
1737 1796
1738 foreach $section (@{$args{'sectionlist'}}) { 1797 foreach $section (@{$args{'sectionlist'}}) {
1739 print "**$section**\n\n"; 1798 if ($output_selection != OUTPUT_INCLUDE) {
1799 print "**$section**\n\n";
1800 }
1801 print_lineno($section_start_lines{$section});
1740 output_highlight_rst($args{'sections'}{$section}); 1802 output_highlight_rst($args{'sections'}{$section});
1741 print "\n"; 1803 print "\n";
1742 } 1804 }
@@ -1753,19 +1815,14 @@ sub output_highlight_rst {
1753 die $@ if $@; 1815 die $@ if $@;
1754 1816
1755 foreach $line (split "\n", $contents) { 1817 foreach $line (split "\n", $contents) {
1756 if ($line eq "") { 1818 print $lineprefix . $line . "\n";
1757 print $lineprefix, $blankline;
1758 } else {
1759 $line =~ s/\\\\\\/\&/g;
1760 print $lineprefix, $line;
1761 }
1762 print "\n";
1763 } 1819 }
1764} 1820}
1765 1821
1766sub output_function_rst(%) { 1822sub output_function_rst(%) {
1767 my %args = %{$_[0]}; 1823 my %args = %{$_[0]};
1768 my ($parameter, $section); 1824 my ($parameter, $section);
1825 my $oldprefix = $lineprefix;
1769 my $start; 1826 my $start;
1770 1827
1771 print ".. c:function:: "; 1828 print ".. c:function:: ";
@@ -1790,29 +1847,37 @@ sub output_function_rst(%) {
1790 print $type . " " . $parameter; 1847 print $type . " " . $parameter;
1791 } 1848 }
1792 } 1849 }
1793 print ")\n\n " . $args{'purpose'} . "\n\n"; 1850 print ")\n\n";
1851 print_lineno($declaration_start_line);
1852 $lineprefix = " ";
1853 output_highlight_rst($args{'purpose'});
1854 print "\n";
1794 1855
1795 print ":Parameters:\n\n"; 1856 print "**Parameters**\n\n";
1857 $lineprefix = " ";
1796 foreach $parameter (@{$args{'parameterlist'}}) { 1858 foreach $parameter (@{$args{'parameterlist'}}) {
1797 my $parameter_name = $parameter; 1859 my $parameter_name = $parameter;
1798 #$parameter_name =~ s/\[.*//; 1860 #$parameter_name =~ s/\[.*//;
1799 $type = $args{'parametertypes'}{$parameter}; 1861 $type = $args{'parametertypes'}{$parameter};
1800 1862
1801 if ($type ne "") { 1863 if ($type ne "") {
1802 print " ``$type $parameter``\n"; 1864 print "``$type $parameter``\n";
1803 } else { 1865 } else {
1804 print " ``$parameter``\n"; 1866 print "``$parameter``\n";
1805 } 1867 }
1806 if ($args{'parameterdescs'}{$parameter_name} ne $undescribed) { 1868
1807 my $oldprefix = $lineprefix; 1869 print_lineno($parameterdesc_start_lines{$parameter_name});
1808 $lineprefix = " "; 1870
1871 if (defined($args{'parameterdescs'}{$parameter_name}) &&
1872 $args{'parameterdescs'}{$parameter_name} ne $undescribed) {
1809 output_highlight_rst($args{'parameterdescs'}{$parameter_name}); 1873 output_highlight_rst($args{'parameterdescs'}{$parameter_name});
1810 $lineprefix = $oldprefix;
1811 } else { 1874 } else {
1812 print "\n _undescribed_\n"; 1875 print " *undescribed*\n";
1813 } 1876 }
1814 print "\n"; 1877 print "\n";
1815 } 1878 }
1879
1880 $lineprefix = $oldprefix;
1816 output_section_rst(@_); 1881 output_section_rst(@_);
1817} 1882}
1818 1883
@@ -1820,10 +1885,11 @@ sub output_section_rst(%) {
1820 my %args = %{$_[0]}; 1885 my %args = %{$_[0]};
1821 my $section; 1886 my $section;
1822 my $oldprefix = $lineprefix; 1887 my $oldprefix = $lineprefix;
1823 $lineprefix = " "; 1888 $lineprefix = "";
1824 1889
1825 foreach $section (@{$args{'sectionlist'}}) { 1890 foreach $section (@{$args{'sectionlist'}}) {
1826 print ":$section:\n\n"; 1891 print "**$section**\n\n";
1892 print_lineno($section_start_lines{$section});
1827 output_highlight_rst($args{'sections'}{$section}); 1893 output_highlight_rst($args{'sections'}{$section});
1828 print "\n"; 1894 print "\n";
1829 } 1895 }
@@ -1834,24 +1900,28 @@ sub output_section_rst(%) {
1834sub output_enum_rst(%) { 1900sub output_enum_rst(%) {
1835 my %args = %{$_[0]}; 1901 my %args = %{$_[0]};
1836 my ($parameter); 1902 my ($parameter);
1903 my $oldprefix = $lineprefix;
1837 my $count; 1904 my $count;
1838 my $name = "enum " . $args{'enum'}; 1905 my $name = "enum " . $args{'enum'};
1839 1906
1840 print "\n\n.. c:type:: " . $name . "\n\n"; 1907 print "\n\n.. c:type:: " . $name . "\n\n";
1841 print " " . $args{'purpose'} . "\n\n"; 1908 print_lineno($declaration_start_line);
1909 $lineprefix = " ";
1910 output_highlight_rst($args{'purpose'});
1911 print "\n";
1842 1912
1843 print "..\n\n:Constants:\n\n"; 1913 print "**Constants**\n\n";
1844 my $oldprefix = $lineprefix; 1914 $lineprefix = " ";
1845 $lineprefix = " ";
1846 foreach $parameter (@{$args{'parameterlist'}}) { 1915 foreach $parameter (@{$args{'parameterlist'}}) {
1847 print " `$parameter`\n"; 1916 print "``$parameter``\n";
1848 if ($args{'parameterdescs'}{$parameter} ne $undescribed) { 1917 if ($args{'parameterdescs'}{$parameter} ne $undescribed) {
1849 output_highlight_rst($args{'parameterdescs'}{$parameter}); 1918 output_highlight_rst($args{'parameterdescs'}{$parameter});
1850 } else { 1919 } else {
1851 print " undescribed\n"; 1920 print " *undescribed*\n";
1852 } 1921 }
1853 print "\n"; 1922 print "\n";
1854 } 1923 }
1924
1855 $lineprefix = $oldprefix; 1925 $lineprefix = $oldprefix;
1856 output_section_rst(@_); 1926 output_section_rst(@_);
1857} 1927}
@@ -1859,30 +1929,37 @@ sub output_enum_rst(%) {
1859sub output_typedef_rst(%) { 1929sub output_typedef_rst(%) {
1860 my %args = %{$_[0]}; 1930 my %args = %{$_[0]};
1861 my ($parameter); 1931 my ($parameter);
1862 my $count; 1932 my $oldprefix = $lineprefix;
1863 my $name = "typedef " . $args{'typedef'}; 1933 my $name = "typedef " . $args{'typedef'};
1864 1934
1865 ### FIXME: should the name below contain "typedef" or not?
1866 print "\n\n.. c:type:: " . $name . "\n\n"; 1935 print "\n\n.. c:type:: " . $name . "\n\n";
1867 print " " . $args{'purpose'} . "\n\n"; 1936 print_lineno($declaration_start_line);
1937 $lineprefix = " ";
1938 output_highlight_rst($args{'purpose'});
1939 print "\n";
1868 1940
1941 $lineprefix = $oldprefix;
1869 output_section_rst(@_); 1942 output_section_rst(@_);
1870} 1943}
1871 1944
1872sub output_struct_rst(%) { 1945sub output_struct_rst(%) {
1873 my %args = %{$_[0]}; 1946 my %args = %{$_[0]};
1874 my ($parameter); 1947 my ($parameter);
1948 my $oldprefix = $lineprefix;
1875 my $name = $args{'type'} . " " . $args{'struct'}; 1949 my $name = $args{'type'} . " " . $args{'struct'};
1876 1950
1877 print "\n\n.. c:type:: " . $name . "\n\n"; 1951 print "\n\n.. c:type:: " . $name . "\n\n";
1878 print " " . $args{'purpose'} . "\n\n"; 1952 print_lineno($declaration_start_line);
1953 $lineprefix = " ";
1954 output_highlight_rst($args{'purpose'});
1955 print "\n";
1879 1956
1880 print ":Definition:\n\n"; 1957 print "**Definition**\n\n";
1881 print " ::\n\n"; 1958 print "::\n\n";
1882 print " " . $args{'type'} . " " . $args{'struct'} . " {\n"; 1959 print " " . $args{'type'} . " " . $args{'struct'} . " {\n";
1883 foreach $parameter (@{$args{'parameterlist'}}) { 1960 foreach $parameter (@{$args{'parameterlist'}}) {
1884 if ($parameter =~ /^#/) { 1961 if ($parameter =~ /^#/) {
1885 print " " . "$parameter\n"; 1962 print " " . "$parameter\n";
1886 next; 1963 next;
1887 } 1964 }
1888 1965
@@ -1903,7 +1980,8 @@ sub output_struct_rst(%) {
1903 } 1980 }
1904 print " };\n\n"; 1981 print " };\n\n";
1905 1982
1906 print ":Members:\n\n"; 1983 print "**Members**\n\n";
1984 $lineprefix = " ";
1907 foreach $parameter (@{$args{'parameterlist'}}) { 1985 foreach $parameter (@{$args{'parameterlist'}}) {
1908 ($parameter =~ /^#/) && next; 1986 ($parameter =~ /^#/) && next;
1909 1987
@@ -1912,14 +1990,14 @@ sub output_struct_rst(%) {
1912 1990
1913 ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; 1991 ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
1914 $type = $args{'parametertypes'}{$parameter}; 1992 $type = $args{'parametertypes'}{$parameter};
1915 print " `$type $parameter`" . "\n"; 1993 print_lineno($parameterdesc_start_lines{$parameter_name});
1916 my $oldprefix = $lineprefix; 1994 print "``$type $parameter``\n";
1917 $lineprefix = " ";
1918 output_highlight_rst($args{'parameterdescs'}{$parameter_name}); 1995 output_highlight_rst($args{'parameterdescs'}{$parameter_name});
1919 $lineprefix = $oldprefix;
1920 print "\n"; 1996 print "\n";
1921 } 1997 }
1922 print "\n"; 1998 print "\n";
1999
2000 $lineprefix = $oldprefix;
1923 output_section_rst(@_); 2001 output_section_rst(@_);
1924} 2002}
1925 2003
@@ -1969,9 +2047,13 @@ sub output_declaration {
1969 my $name = shift; 2047 my $name = shift;
1970 my $functype = shift; 2048 my $functype = shift;
1971 my $func = "output_${functype}_$output_mode"; 2049 my $func = "output_${functype}_$output_mode";
1972 if (($function_only==0) || 2050 if (($output_selection == OUTPUT_ALL) ||
1973 ( $function_only == 1 && defined($function_table{$name})) || 2051 (($output_selection == OUTPUT_INCLUDE ||
1974 ( $function_only == 2 && !($functype eq "function" && defined($function_table{$name})))) 2052 $output_selection == OUTPUT_EXPORTED) &&
2053 defined($function_table{$name})) ||
2054 (($output_selection == OUTPUT_EXCLUDE ||
2055 $output_selection == OUTPUT_INTERNAL) &&
2056 !($functype eq "function" && defined($function_table{$name}))))
1975 { 2057 {
1976 &$func(@_); 2058 &$func(@_);
1977 $section_counter++; 2059 $section_counter++;
@@ -2471,7 +2553,6 @@ sub dump_function($$) {
2471 2553
2472sub reset_state { 2554sub reset_state {
2473 $function = ""; 2555 $function = "";
2474 %constants = ();
2475 %parameterdescs = (); 2556 %parameterdescs = ();
2476 %parametertypes = (); 2557 %parametertypes = ();
2477 @parameterlist = (); 2558 @parameterlist = ();
@@ -2481,8 +2562,8 @@ sub reset_state {
2481 $struct_actual = ""; 2562 $struct_actual = "";
2482 $prototype = ""; 2563 $prototype = "";
2483 2564
2484 $state = 0; 2565 $state = STATE_NORMAL;
2485 $split_doc_state = 0; 2566 $inline_doc_state = STATE_INLINE_NA;
2486} 2567}
2487 2568
2488sub tracepoint_munge($) { 2569sub tracepoint_munge($) {
@@ -2545,7 +2626,7 @@ sub syscall_munge() {
2545 } 2626 }
2546} 2627}
2547 2628
2548sub process_state3_function($$) { 2629sub process_proto_function($$) {
2549 my $x = shift; 2630 my $x = shift;
2550 my $file = shift; 2631 my $file = shift;
2551 2632
@@ -2575,7 +2656,7 @@ sub process_state3_function($$) {
2575 } 2656 }
2576} 2657}
2577 2658
2578sub process_state3_type($$) { 2659sub process_proto_type($$) {
2579 my $x = shift; 2660 my $x = shift;
2580 my $file = shift; 2661 my $file = shift;
2581 2662
@@ -2657,6 +2738,7 @@ sub process_file($) {
2657 my $in_purpose = 0; 2738 my $in_purpose = 0;
2658 my $initial_section_counter = $section_counter; 2739 my $initial_section_counter = $section_counter;
2659 my ($orig_file) = @_; 2740 my ($orig_file) = @_;
2741 my $leading_space;
2660 2742
2661 if (defined($ENV{'SRCTREE'})) { 2743 if (defined($ENV{'SRCTREE'})) {
2662 $file = "$ENV{'SRCTREE'}" . "/" . $orig_file; 2744 $file = "$ENV{'SRCTREE'}" . "/" . $orig_file;
@@ -2674,6 +2756,17 @@ sub process_file($) {
2674 return; 2756 return;
2675 } 2757 }
2676 2758
2759 # two passes for -export and -internal
2760 if ($output_selection == OUTPUT_EXPORTED ||
2761 $output_selection == OUTPUT_INTERNAL) {
2762 while (<IN>) {
2763 if (/$export_symbol/o) {
2764 $function_table{$2} = 1;
2765 }
2766 }
2767 seek(IN, 0, 0);
2768 }
2769
2677 $. = 1; 2770 $. = 1;
2678 2771
2679 $section_counter = 0; 2772 $section_counter = 0;
@@ -2681,15 +2774,18 @@ sub process_file($) {
2681 while (s/\\\s*$//) { 2774 while (s/\\\s*$//) {
2682 $_ .= <IN>; 2775 $_ .= <IN>;
2683 } 2776 }
2684 if ($state == 0) { 2777 if ($state == STATE_NORMAL) {
2685 if (/$doc_start/o) { 2778 if (/$doc_start/o) {
2686 $state = 1; # next line is always the function name 2779 $state = STATE_NAME; # next line is always the function name
2687 $in_doc_sect = 0; 2780 $in_doc_sect = 0;
2781 $declaration_start_line = $. + 1;
2688 } 2782 }
2689 } elsif ($state == 1) { # this line is the function name (always) 2783 } elsif ($state == STATE_NAME) {# this line is the function name (always)
2690 if (/$doc_block/o) { 2784 if (/$doc_block/o) {
2691 $state = 4; 2785 $state = STATE_DOCBLOCK;
2692 $contents = ""; 2786 $contents = "";
2787 $new_start_line = $. + 1;
2788
2693 if ( $1 eq "" ) { 2789 if ( $1 eq "" ) {
2694 $section = $section_intro; 2790 $section = $section_intro;
2695 } else { 2791 } else {
@@ -2702,7 +2798,12 @@ sub process_file($) {
2702 $identifier = $1; 2798 $identifier = $1;
2703 } 2799 }
2704 2800
2705 $state = 2; 2801 $state = STATE_FIELD;
2802 # if there's no @param blocks need to set up default section
2803 # here
2804 $contents = "";
2805 $section = $section_default;
2806 $new_start_line = $. + 1;
2706 if (/-(.*)/) { 2807 if (/-(.*)/) {
2707 # strip leading/trailing/multiple spaces 2808 # strip leading/trailing/multiple spaces
2708 $descr= $1; 2809 $descr= $1;
@@ -2740,13 +2841,25 @@ sub process_file($) {
2740 print STDERR "${file}:$.: warning: Cannot understand $_ on line $.", 2841 print STDERR "${file}:$.: warning: Cannot understand $_ on line $.",
2741 " - I thought it was a doc line\n"; 2842 " - I thought it was a doc line\n";
2742 ++$warnings; 2843 ++$warnings;
2743 $state = 0; 2844 $state = STATE_NORMAL;
2744 } 2845 }
2745 } elsif ($state == 2) { # look for head: lines, and include content 2846 } elsif ($state == STATE_FIELD) { # look for head: lines, and include content
2746 if (/$doc_sect/o) { 2847 if (/$doc_sect/i) { # case insensitive for supported section names
2747 $newsection = $1; 2848 $newsection = $1;
2748 $newcontents = $2; 2849 $newcontents = $2;
2749 2850
2851 # map the supported section names to the canonical names
2852 if ($newsection =~ m/^description$/i) {
2853 $newsection = $section_default;
2854 } elsif ($newsection =~ m/^context$/i) {
2855 $newsection = $section_context;
2856 } elsif ($newsection =~ m/^returns?$/i) {
2857 $newsection = $section_return;
2858 } elsif ($newsection =~ m/^\@return$/) {
2859 # special: @return is a section, not a param description
2860 $newsection = $section_return;
2861 }
2862
2750 if (($contents ne "") && ($contents ne "\n")) { 2863 if (($contents ne "") && ($contents ne "\n")) {
2751 if (!$in_doc_sect && $verbose) { 2864 if (!$in_doc_sect && $verbose) {
2752 print STDERR "${file}:$.: warning: contents before sections\n"; 2865 print STDERR "${file}:$.: warning: contents before sections\n";
@@ -2759,14 +2872,16 @@ sub process_file($) {
2759 $in_doc_sect = 1; 2872 $in_doc_sect = 1;
2760 $in_purpose = 0; 2873 $in_purpose = 0;
2761 $contents = $newcontents; 2874 $contents = $newcontents;
2875 $new_start_line = $.;
2876 while ((substr($contents, 0, 1) eq " ") ||
2877 substr($contents, 0, 1) eq "\t") {
2878 $contents = substr($contents, 1);
2879 }
2762 if ($contents ne "") { 2880 if ($contents ne "") {
2763 while ((substr($contents, 0, 1) eq " ") ||
2764 substr($contents, 0, 1) eq "\t") {
2765 $contents = substr($contents, 1);
2766 }
2767 $contents .= "\n"; 2881 $contents .= "\n";
2768 } 2882 }
2769 $section = $newsection; 2883 $section = $newsection;
2884 $leading_space = undef;
2770 } elsif (/$doc_end/) { 2885 } elsif (/$doc_end/) {
2771 if (($contents ne "") && ($contents ne "\n")) { 2886 if (($contents ne "") && ($contents ne "\n")) {
2772 dump_section($file, $section, xml_escape($contents)); 2887 dump_section($file, $section, xml_escape($contents));
@@ -2780,7 +2895,7 @@ sub process_file($) {
2780 } 2895 }
2781 2896
2782 $prototype = ""; 2897 $prototype = "";
2783 $state = 3; 2898 $state = STATE_PROTO;
2784 $brcount = 0; 2899 $brcount = 0;
2785# print STDERR "end of doc comment, looking for prototype\n"; 2900# print STDERR "end of doc comment, looking for prototype\n";
2786 } elsif (/$doc_content/) { 2901 } elsif (/$doc_content/) {
@@ -2791,6 +2906,7 @@ sub process_file($) {
2791 dump_section($file, $section, xml_escape($contents)); 2906 dump_section($file, $section, xml_escape($contents));
2792 $section = $section_default; 2907 $section = $section_default;
2793 $contents = ""; 2908 $contents = "";
2909 $new_start_line = $.;
2794 } else { 2910 } else {
2795 $contents .= "\n"; 2911 $contents .= "\n";
2796 } 2912 }
@@ -2801,87 +2917,86 @@ sub process_file($) {
2801 $declaration_purpose .= " " . xml_escape($1); 2917 $declaration_purpose .= " " . xml_escape($1);
2802 $declaration_purpose =~ s/\s+/ /g; 2918 $declaration_purpose =~ s/\s+/ /g;
2803 } else { 2919 } else {
2804 $contents .= $1 . "\n"; 2920 my $cont = $1;
2921 if ($section =~ m/^@/ || $section eq $section_context) {
2922 if (!defined $leading_space) {
2923 if ($cont =~ m/^(\s+)/) {
2924 $leading_space = $1;
2925 } else {
2926 $leading_space = "";
2927 }
2928 }
2929
2930 $cont =~ s/^$leading_space//;
2931 }
2932 $contents .= $cont . "\n";
2805 } 2933 }
2806 } else { 2934 } else {
2807 # i dont know - bad line? ignore. 2935 # i dont know - bad line? ignore.
2808 print STDERR "${file}:$.: warning: bad line: $_"; 2936 print STDERR "${file}:$.: warning: bad line: $_";
2809 ++$warnings; 2937 ++$warnings;
2810 } 2938 }
2811 } elsif ($state == 5) { # scanning for split parameters 2939 } elsif ($state == STATE_INLINE) { # scanning for inline parameters
2812 # First line (state 1) needs to be a @parameter 2940 # First line (state 1) needs to be a @parameter
2813 if ($split_doc_state == 1 && /$doc_split_sect/o) { 2941 if ($inline_doc_state == STATE_INLINE_NAME && /$doc_inline_sect/o) {
2814 $section = $1; 2942 $section = $1;
2815 $contents = $2; 2943 $contents = $2;
2944 $new_start_line = $.;
2816 if ($contents ne "") { 2945 if ($contents ne "") {
2817 while ((substr($contents, 0, 1) eq " ") || 2946 while ((substr($contents, 0, 1) eq " ") ||
2818 substr($contents, 0, 1) eq "\t") { 2947 substr($contents, 0, 1) eq "\t") {
2819 $contents = substr($contents, 1); 2948 $contents = substr($contents, 1);
2820 } 2949 }
2821 $contents .= "\n"; 2950 $contents .= "\n";
2822 } 2951 }
2823 $split_doc_state = 2; 2952 $inline_doc_state = STATE_INLINE_TEXT;
2824 # Documentation block end */ 2953 # Documentation block end */
2825 } elsif (/$doc_split_end/) { 2954 } elsif (/$doc_inline_end/) {
2826 if (($contents ne "") && ($contents ne "\n")) { 2955 if (($contents ne "") && ($contents ne "\n")) {
2827 dump_section($file, $section, xml_escape($contents)); 2956 dump_section($file, $section, xml_escape($contents));
2828 $section = $section_default; 2957 $section = $section_default;
2829 $contents = ""; 2958 $contents = "";
2830 } 2959 }
2831 $state = 3; 2960 $state = STATE_PROTO;
2832 $split_doc_state = 0; 2961 $inline_doc_state = STATE_INLINE_NA;
2833 # Regular text 2962 # Regular text
2834 } elsif (/$doc_content/) { 2963 } elsif (/$doc_content/) {
2835 if ($split_doc_state == 2) { 2964 if ($inline_doc_state == STATE_INLINE_TEXT) {
2836 $contents .= $1 . "\n"; 2965 $contents .= $1 . "\n";
2837 } elsif ($split_doc_state == 1) { 2966 # nuke leading blank lines
2838 $split_doc_state = 4; 2967 if ($contents =~ /^\s*$/) {
2968 $contents = "";
2969 }
2970 } elsif ($inline_doc_state == STATE_INLINE_NAME) {
2971 $inline_doc_state = STATE_INLINE_ERROR;
2839 print STDERR "Warning(${file}:$.): "; 2972 print STDERR "Warning(${file}:$.): ";
2840 print STDERR "Incorrect use of kernel-doc format: $_"; 2973 print STDERR "Incorrect use of kernel-doc format: $_";
2841 ++$warnings; 2974 ++$warnings;
2842 } 2975 }
2843 } 2976 }
2844 } elsif ($state == 3) { # scanning for function '{' (end of prototype) 2977 } elsif ($state == STATE_PROTO) { # scanning for function '{' (end of prototype)
2845 if (/$doc_split_start/) { 2978 if (/$doc_inline_start/) {
2846 $state = 5; 2979 $state = STATE_INLINE;
2847 $split_doc_state = 1; 2980 $inline_doc_state = STATE_INLINE_NAME;
2848 } elsif ($decl_type eq 'function') { 2981 } elsif ($decl_type eq 'function') {
2849 process_state3_function($_, $file); 2982 process_proto_function($_, $file);
2850 } else { 2983 } else {
2851 process_state3_type($_, $file); 2984 process_proto_type($_, $file);
2852 } 2985 }
2853 } elsif ($state == 4) { 2986 } elsif ($state == STATE_DOCBLOCK) {
2854 # Documentation block 2987 if (/$doc_end/)
2855 if (/$doc_block/) {
2856 dump_doc_section($file, $section, xml_escape($contents));
2857 $contents = "";
2858 $function = "";
2859 %constants = ();
2860 %parameterdescs = ();
2861 %parametertypes = ();
2862 @parameterlist = ();
2863 %sections = ();
2864 @sectionlist = ();
2865 $prototype = "";
2866 if ( $1 eq "" ) {
2867 $section = $section_intro;
2868 } else {
2869 $section = $1;
2870 }
2871 }
2872 elsif (/$doc_end/)
2873 { 2988 {
2874 dump_doc_section($file, $section, xml_escape($contents)); 2989 dump_doc_section($file, $section, xml_escape($contents));
2990 $section = $section_default;
2875 $contents = ""; 2991 $contents = "";
2876 $function = ""; 2992 $function = "";
2877 %constants = ();
2878 %parameterdescs = (); 2993 %parameterdescs = ();
2879 %parametertypes = (); 2994 %parametertypes = ();
2880 @parameterlist = (); 2995 @parameterlist = ();
2881 %sections = (); 2996 %sections = ();
2882 @sectionlist = (); 2997 @sectionlist = ();
2883 $prototype = ""; 2998 $prototype = "";
2884 $state = 0; 2999 $state = STATE_NORMAL;
2885 } 3000 }
2886 elsif (/$doc_content/) 3001 elsif (/$doc_content/)
2887 { 3002 {
@@ -2898,7 +3013,7 @@ sub process_file($) {
2898 } 3013 }
2899 if ($initial_section_counter == $section_counter) { 3014 if ($initial_section_counter == $section_counter) {
2900 print STDERR "${file}:1: warning: no structured comments found\n"; 3015 print STDERR "${file}:1: warning: no structured comments found\n";
2901 if (($function_only == 1) && ($show_not_found == 1)) { 3016 if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
2902 print STDERR " Was looking for '$_'.\n" for keys %function_table; 3017 print STDERR " Was looking for '$_'.\n" for keys %function_table;
2903 } 3018 }
2904 if ($output_mode eq "xml") { 3019 if ($output_mode eq "xml") {
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index a9155077feef..fec75786f75b 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
384 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", 384 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
385 (*type)[0] ? *type : "*"); 385 (*type)[0] ? *type : "*");
386 386
387 if (compatible[0]) 387 if ((*compatible)[0])
388 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", 388 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
389 *compatible); 389 *compatible);
390 390
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c8783b3b628c..36c80bf5b89c 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
134 134
135 case KEYCTL_DH_COMPUTE: 135 case KEYCTL_DH_COMPUTE:
136 return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3), 136 return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
137 arg4); 137 arg4, compat_ptr(arg5));
138 138
139 default: 139 default:
140 return -EOPNOTSUPP; 140 return -EOPNOTSUPP;
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 880505a4b9f1..531ed2ec132f 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -78,7 +78,8 @@ error:
78} 78}
79 79
80long keyctl_dh_compute(struct keyctl_dh_params __user *params, 80long keyctl_dh_compute(struct keyctl_dh_params __user *params,
81 char __user *buffer, size_t buflen) 81 char __user *buffer, size_t buflen,
82 void __user *reserved)
82{ 83{
83 long ret; 84 long ret;
84 MPI base, private, prime, result; 85 MPI base, private, prime, result;
@@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params,
97 goto out; 98 goto out;
98 } 99 }
99 100
101 if (reserved) {
102 ret = -EINVAL;
103 goto out;
104 }
105
100 keylen = mpi_from_key(pcopy.prime, buflen, &prime); 106 keylen = mpi_from_key(pcopy.prime, buflen, &prime);
101 if (keylen < 0 || !prime) { 107 if (keylen < 0 || !prime) {
102 /* buflen == 0 may be used to query the required buffer size, 108 /* buflen == 0 may be used to query the required buffer size,
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 8ec7a528365d..a705a7d92ad7 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
260 260
261#ifdef CONFIG_KEY_DH_OPERATIONS 261#ifdef CONFIG_KEY_DH_OPERATIONS
262extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, 262extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
263 size_t); 263 size_t, void __user *);
264#else 264#else
265static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, 265static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
266 char __user *buffer, size_t buflen) 266 char __user *buffer, size_t buflen,
267 void __user *reserved)
267{ 268{
268 return -EOPNOTSUPP; 269 return -EOPNOTSUPP;
269} 270}
diff --git a/security/keys/key.c b/security/keys/key.c
index bd5a272f28a6..346fbf201c22 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key,
597 597
598 mutex_unlock(&key_construction_mutex); 598 mutex_unlock(&key_construction_mutex);
599 599
600 if (keyring) 600 if (keyring && link_ret == 0)
601 __key_link_end(keyring, &key->index_key, edit); 601 __key_link_end(keyring, &key->index_key, edit);
602 602
603 /* wake up anyone waiting for a key to be constructed */ 603 /* wake up anyone waiting for a key to be constructed */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 3b135a0af344..d580ad06b792 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
1688 1688
1689 case KEYCTL_DH_COMPUTE: 1689 case KEYCTL_DH_COMPUTE:
1690 return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, 1690 return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
1691 (char __user *) arg3, 1691 (char __user *) arg3, (size_t) arg4,
1692 (size_t) arg4); 1692 (void __user *) arg5);
1693 1693
1694 default: 1694 default:
1695 return -EOPNOTSUPP; 1695 return -EOPNOTSUPP;
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index c0f8f613f1f1..172dacd925f5 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
420 420
421static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) 421static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
422{ 422{
423 hrtimer_cancel(&dpcm->timer);
423 tasklet_kill(&dpcm->tasklet); 424 tasklet_kill(&dpcm->tasklet);
424} 425}
425 426
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 87041ddd29cb..47a358fab132 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
444 err = reg_raw_write(codec, reg, val); 444 err = reg_raw_write(codec, reg, val);
445 if (err == -EAGAIN) { 445 if (err == -EAGAIN) {
446 err = snd_hdac_power_up_pm(codec); 446 err = snd_hdac_power_up_pm(codec);
447 if (!err) 447 if (err >= 0)
448 err = reg_raw_write(codec, reg, val); 448 err = reg_raw_write(codec, reg, val);
449 snd_hdac_power_down_pm(codec); 449 snd_hdac_power_down_pm(codec);
450 } 450 }
@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
470 err = reg_raw_read(codec, reg, val, uncached); 470 err = reg_raw_read(codec, reg, val, uncached);
471 if (err == -EAGAIN) { 471 if (err == -EAGAIN) {
472 err = snd_hdac_power_up_pm(codec); 472 err = snd_hdac_power_up_pm(codec);
473 if (!err) 473 if (err >= 0)
474 err = reg_raw_read(codec, reg, val, uncached); 474 err = reg_raw_read(codec, reg, val, uncached);
475 snd_hdac_power_down_pm(codec); 475 snd_hdac_power_down_pm(codec);
476 } 476 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 9a0d1445ca5c..94089fc71884 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -365,8 +365,11 @@ enum {
365 365
366#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) 366#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
368#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 370#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
369#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) 371#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
372 IS_KBL(pci) || IS_KBL_LP(pci)
370 373
371static char *driver_short_names[] = { 374static char *driver_short_names[] = {
372 [AZX_DRIVER_ICH] = "HDA Intel", 375 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = {
2181 /* Sunrise Point-LP */ 2184 /* Sunrise Point-LP */
2182 { PCI_DEVICE(0x8086, 0x9d70), 2185 { PCI_DEVICE(0x8086, 0x9d70),
2183 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2186 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2187 /* Kabylake */
2188 { PCI_DEVICE(0x8086, 0xa171),
2189 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2190 /* Kabylake-LP */
2191 { PCI_DEVICE(0x8086, 0x9d71),
2192 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2184 /* Broxton-P(Apollolake) */ 2193 /* Broxton-P(Apollolake) */
2185 { PCI_DEVICE(0x8086, 0x5a98), 2194 { PCI_DEVICE(0x8086, 0x5a98),
2186 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2195 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 17fd81736d3d..0621920f7617 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip,
115/* 115/*
116 * Register access ops. Tegra HDA register access is DWORD only. 116 * Register access ops. Tegra HDA register access is DWORD only.
117 */ 117 */
118static void hda_tegra_writel(u32 value, u32 *addr) 118static void hda_tegra_writel(u32 value, u32 __iomem *addr)
119{ 119{
120 writel(value, addr); 120 writel(value, addr);
121} 121}
122 122
123static u32 hda_tegra_readl(u32 *addr) 123static u32 hda_tegra_readl(u32 __iomem *addr)
124{ 124{
125 return readl(addr); 125 return readl(addr);
126} 126}
127 127
128static void hda_tegra_writew(u16 value, u16 *addr) 128static void hda_tegra_writew(u16 value, u16 __iomem *addr)
129{ 129{
130 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 130 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
131 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 131 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
132 u32 v; 132 u32 v;
133 133
134 v = readl(dword_addr); 134 v = readl(dword_addr);
@@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr)
137 writel(v, dword_addr); 137 writel(v, dword_addr);
138} 138}
139 139
140static u16 hda_tegra_readw(u16 *addr) 140static u16 hda_tegra_readw(u16 __iomem *addr)
141{ 141{
142 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 142 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
143 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 143 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
144 u32 v; 144 u32 v;
145 145
146 v = readl(dword_addr); 146 v = readl(dword_addr);
147 return (v >> shift) & 0xffff; 147 return (v >> shift) & 0xffff;
148} 148}
149 149
150static void hda_tegra_writeb(u8 value, u8 *addr) 150static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
151{ 151{
152 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 152 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
153 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 153 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
154 u32 v; 154 u32 v;
155 155
156 v = readl(dword_addr); 156 v = readl(dword_addr);
@@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr)
159 writel(v, dword_addr); 159 writel(v, dword_addr);
160} 160}
161 161
162static u8 hda_tegra_readb(u8 *addr) 162static u8 hda_tegra_readb(u8 __iomem *addr)
163{ 163{
164 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 164 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
165 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 165 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
166 u32 v; 166 u32 v;
167 167
168 v = readl(dword_addr); 168 v = readl(dword_addr);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d53c25e7a1c1..900bfbc3368c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
346 case 0x10ec0234: 346 case 0x10ec0234:
347 case 0x10ec0274: 347 case 0x10ec0274:
348 case 0x10ec0294: 348 case 0x10ec0294:
349 case 0x10ec0700:
350 case 0x10ec0701:
351 case 0x10ec0703:
349 alc_update_coef_idx(codec, 0x10, 1<<15, 0); 352 alc_update_coef_idx(codec, 0x10, 1<<15, 0);
350 break; 353 break;
351 case 0x10ec0662: 354 case 0x10ec0662:
@@ -2655,6 +2658,7 @@ enum {
2655 ALC269_TYPE_ALC256, 2658 ALC269_TYPE_ALC256,
2656 ALC269_TYPE_ALC225, 2659 ALC269_TYPE_ALC225,
2657 ALC269_TYPE_ALC294, 2660 ALC269_TYPE_ALC294,
2661 ALC269_TYPE_ALC700,
2658}; 2662};
2659 2663
2660/* 2664/*
@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
2686 case ALC269_TYPE_ALC256: 2690 case ALC269_TYPE_ALC256:
2687 case ALC269_TYPE_ALC225: 2691 case ALC269_TYPE_ALC225:
2688 case ALC269_TYPE_ALC294: 2692 case ALC269_TYPE_ALC294:
2693 case ALC269_TYPE_ALC700:
2689 ssids = alc269_ssids; 2694 ssids = alc269_ssids;
2690 break; 2695 break;
2691 default: 2696 default:
@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3618static void alc_headset_mode_unplugged(struct hda_codec *codec) 3623static void alc_headset_mode_unplugged(struct hda_codec *codec)
3619{ 3624{
3620 static struct coef_fw coef0255[] = { 3625 static struct coef_fw coef0255[] = {
3621 WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
3622 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ 3626 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
3623 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ 3627 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
3624 WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ 3628 WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
3625 WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ 3629 WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
3626 {} 3630 {}
3627 }; 3631 };
3632 static struct coef_fw coef0255_1[] = {
3633 WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
3634 {}
3635 };
3636 static struct coef_fw coef0256[] = {
3637 WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
3638 {}
3639 };
3628 static struct coef_fw coef0233[] = { 3640 static struct coef_fw coef0233[] = {
3629 WRITE_COEF(0x1b, 0x0c0b), 3641 WRITE_COEF(0x1b, 0x0c0b),
3630 WRITE_COEF(0x45, 0xc429), 3642 WRITE_COEF(0x45, 0xc429),
@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3677 3689
3678 switch (codec->core.vendor_id) { 3690 switch (codec->core.vendor_id) {
3679 case 0x10ec0255: 3691 case 0x10ec0255:
3692 alc_process_coef_fw(codec, coef0255_1);
3693 alc_process_coef_fw(codec, coef0255);
3694 break;
3680 case 0x10ec0256: 3695 case 0x10ec0256:
3696 alc_process_coef_fw(codec, coef0256);
3681 alc_process_coef_fw(codec, coef0255); 3697 alc_process_coef_fw(codec, coef0255);
3682 break; 3698 break;
3683 case 0x10ec0233: 3699 case 0x10ec0233:
@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3896 WRITE_COEFEX(0x57, 0x03, 0x8ea6), 3912 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3897 {} 3913 {}
3898 }; 3914 };
3915 static struct coef_fw coef0256[] = {
3916 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
3917 WRITE_COEF(0x1b, 0x0c6b),
3918 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3919 {}
3920 };
3899 static struct coef_fw coef0233[] = { 3921 static struct coef_fw coef0233[] = {
3900 WRITE_COEF(0x45, 0xd429), 3922 WRITE_COEF(0x45, 0xd429),
3901 WRITE_COEF(0x1b, 0x0c2b), 3923 WRITE_COEF(0x1b, 0x0c2b),
@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3936 3958
3937 switch (codec->core.vendor_id) { 3959 switch (codec->core.vendor_id) {
3938 case 0x10ec0255: 3960 case 0x10ec0255:
3939 case 0x10ec0256:
3940 alc_process_coef_fw(codec, coef0255); 3961 alc_process_coef_fw(codec, coef0255);
3941 break; 3962 break;
3963 case 0x10ec0256:
3964 alc_process_coef_fw(codec, coef0256);
3965 break;
3942 case 0x10ec0233: 3966 case 0x10ec0233:
3943 case 0x10ec0283: 3967 case 0x10ec0283:
3944 alc_process_coef_fw(codec, coef0233); 3968 alc_process_coef_fw(codec, coef0233);
@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
3978 WRITE_COEFEX(0x57, 0x03, 0x8ea6), 4002 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3979 {} 4003 {}
3980 }; 4004 };
4005 static struct coef_fw coef0256[] = {
4006 WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
4007 WRITE_COEF(0x1b, 0x0c6b),
4008 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
4009 {}
4010 };
3981 static struct coef_fw coef0233[] = { 4011 static struct coef_fw coef0233[] = {
3982 WRITE_COEF(0x45, 0xe429), 4012 WRITE_COEF(0x45, 0xe429),
3983 WRITE_COEF(0x1b, 0x0c2b), 4013 WRITE_COEF(0x1b, 0x0c2b),
@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4018 4048
4019 switch (codec->core.vendor_id) { 4049 switch (codec->core.vendor_id) {
4020 case 0x10ec0255: 4050 case 0x10ec0255:
4021 case 0x10ec0256:
4022 alc_process_coef_fw(codec, coef0255); 4051 alc_process_coef_fw(codec, coef0255);
4023 break; 4052 break;
4053 case 0x10ec0256:
4054 alc_process_coef_fw(codec, coef0256);
4055 break;
4024 case 0x10ec0233: 4056 case 0x10ec0233:
4025 case 0x10ec0283: 4057 case 0x10ec0283:
4026 alc_process_coef_fw(codec, coef0233); 4058 alc_process_coef_fw(codec, coef0233);
@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
4266static void alc255_set_default_jack_type(struct hda_codec *codec) 4298static void alc255_set_default_jack_type(struct hda_codec *codec)
4267{ 4299{
4268 /* Set to iphone type */ 4300 /* Set to iphone type */
4269 static struct coef_fw fw[] = { 4301 static struct coef_fw alc255fw[] = {
4270 WRITE_COEF(0x1b, 0x880b), 4302 WRITE_COEF(0x1b, 0x880b),
4271 WRITE_COEF(0x45, 0xd089), 4303 WRITE_COEF(0x45, 0xd089),
4272 WRITE_COEF(0x1b, 0x080b), 4304 WRITE_COEF(0x1b, 0x080b),
@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
4274 WRITE_COEF(0x1b, 0x0c0b), 4306 WRITE_COEF(0x1b, 0x0c0b),
4275 {} 4307 {}
4276 }; 4308 };
4277 alc_process_coef_fw(codec, fw); 4309 static struct coef_fw alc256fw[] = {
4310 WRITE_COEF(0x1b, 0x884b),
4311 WRITE_COEF(0x45, 0xd089),
4312 WRITE_COEF(0x1b, 0x084b),
4313 WRITE_COEF(0x46, 0x0004),
4314 WRITE_COEF(0x1b, 0x0c4b),
4315 {}
4316 };
4317 switch (codec->core.vendor_id) {
4318 case 0x10ec0255:
4319 alc_process_coef_fw(codec, alc255fw);
4320 break;
4321 case 0x10ec0256:
4322 alc_process_coef_fw(codec, alc256fw);
4323 break;
4324 }
4278 msleep(30); 4325 msleep(30);
4279} 4326}
4280 4327
@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5587 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), 5634 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
5588 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5635 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5589 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5636 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5637 SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
5590 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), 5638 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
5591 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 5639 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5592 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 5640 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -5602,6 +5650,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5602 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5650 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5603 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5604 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5605 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5655 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5606 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5656 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5607 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5657 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5775,11 +5825,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5775 {0x12, 0x90a60180}, 5825 {0x12, 0x90a60180},
5776 {0x14, 0x90170130}, 5826 {0x14, 0x90170130},
5777 {0x21, 0x02211040}), 5827 {0x21, 0x02211040}),
5828 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5829 {0x12, 0x90a60180},
5830 {0x14, 0x90170120},
5831 {0x21, 0x02211030}),
5778 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5832 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5779 {0x12, 0x90a60160}, 5833 {0x12, 0x90a60160},
5780 {0x14, 0x90170120}, 5834 {0x14, 0x90170120},
5781 {0x21, 0x02211030}), 5835 {0x21, 0x02211030}),
5782 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5836 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5837 {0x12, 0x90a60170},
5838 {0x14, 0x90170120},
5839 {0x21, 0x02211030}),
5840 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5783 ALC256_STANDARD_PINS), 5841 ALC256_STANDARD_PINS),
5784 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5842 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5785 {0x12, 0x90a60130}, 5843 {0x12, 0x90a60130},
@@ -6053,6 +6111,14 @@ static int patch_alc269(struct hda_codec *codec)
6053 case 0x10ec0294: 6111 case 0x10ec0294:
6054 spec->codec_variant = ALC269_TYPE_ALC294; 6112 spec->codec_variant = ALC269_TYPE_ALC294;
6055 break; 6113 break;
6114 case 0x10ec0700:
6115 case 0x10ec0701:
6116 case 0x10ec0703:
6117 spec->codec_variant = ALC269_TYPE_ALC700;
6118 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
6119 alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
6120 break;
6121
6056 } 6122 }
6057 6123
6058 if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { 6124 if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -7008,6 +7074,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7008 HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), 7074 HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
7009 HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), 7075 HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
7010 HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), 7076 HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
7077 HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
7078 HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
7079 HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
7011 HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), 7080 HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
7012 HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), 7081 HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
7013 HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), 7082 HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 64425d352962..888133f9e65d 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -28,7 +28,6 @@
28#include <sound/asoundef.h> 28#include <sound/asoundef.h>
29#include <sound/omap-pcm.h> 29#include <sound/omap-pcm.h>
30#include <sound/omap-hdmi-audio.h> 30#include <sound/omap-hdmi-audio.h>
31#include <video/omapdss.h>
32 31
33#define DRV_NAME "omap-hdmi-audio" 32#define DRV_NAME "omap-hdmi-audio"
34 33
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index bbf69d248ec5..9f53020c3269 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si
204 return (value_int & value_mask) | ~value_mask; 204 return (value_int & value_mask) | ~value_mask;
205} 205}
206 206
207static int string_set_value(struct bt_ctf_field *field, const char *string)
208{
209 char *buffer = NULL;
210 size_t len = strlen(string), i, p;
211 int err;
212
213 for (i = p = 0; i < len; i++, p++) {
214 if (isprint(string[i])) {
215 if (!buffer)
216 continue;
217 buffer[p] = string[i];
218 } else {
219 char numstr[5];
220
221 snprintf(numstr, sizeof(numstr), "\\x%02x",
222 (unsigned int)(string[i]) & 0xff);
223
224 if (!buffer) {
225 buffer = zalloc(i + (len - i) * 4 + 2);
226 if (!buffer) {
227 pr_err("failed to set unprintable string '%s'\n", string);
228 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
229 }
230 if (i > 0)
231 strncpy(buffer, string, i);
232 }
233 strncat(buffer + p, numstr, 4);
234 p += 3;
235 }
236 }
237
238 if (!buffer)
239 return bt_ctf_field_string_set_value(field, string);
240 err = bt_ctf_field_string_set_value(field, buffer);
241 free(buffer);
242 return err;
243}
244
207static int add_tracepoint_field_value(struct ctf_writer *cw, 245static int add_tracepoint_field_value(struct ctf_writer *cw,
208 struct bt_ctf_event_class *event_class, 246 struct bt_ctf_event_class *event_class,
209 struct bt_ctf_event *event, 247 struct bt_ctf_event *event,
@@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
270 } 308 }
271 309
272 if (flags & FIELD_IS_STRING) 310 if (flags & FIELD_IS_STRING)
273 ret = bt_ctf_field_string_set_value(field, 311 ret = string_set_value(field, data + offset + i * len);
274 data + offset + i * len);
275 else { 312 else {
276 unsigned long long value_int; 313 unsigned long long value_int;
277 314
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f6fcc6832949..9b141f12329e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
673 int err; 673 int err;
674 union perf_event *event; 674 union perf_event *event;
675 675
676 if (symbol_conf.kptr_restrict)
677 return -1;
676 if (map == NULL) 678 if (map == NULL)
677 return -1; 679 return -1;
678 680
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 20f9cb32b703..54c4ff2b1cee 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str,
1933static bool symbol__read_kptr_restrict(void) 1933static bool symbol__read_kptr_restrict(void)
1934{ 1934{
1935 bool value = false; 1935 bool value = false;
1936 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1936 1937
1937 if (geteuid() != 0) { 1938 if (fp != NULL) {
1938 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1939 char line[8];
1939 if (fp != NULL) {
1940 char line[8];
1941 1940
1942 if (fgets(line, sizeof(line), fp) != NULL) 1941 if (fgets(line, sizeof(line), fp) != NULL)
1943 value = atoi(line) != 0; 1942 value = (geteuid() != 0) ?
1943 (atoi(line) != 0) :
1944 (atoi(line) == 2);
1944 1945
1945 fclose(fp); 1946 fclose(fp);
1946 }
1947 } 1947 }
1948 1948
1949 return value; 1949 return value;
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
index c2b61c4fda11..0bf5085281f3 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35echo "Test histogram with execname modifier" 34echo "Test histogram with execname modifier"
36 35
37echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger 36echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
index b2902d42a537..a00184cd9c95 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35echo "Test histogram basic tigger" 34echo "Test histogram basic tigger"
36 35
37echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger 36echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
index 03c4a46561fc..3478b00ead57 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35reset_trigger 34reset_trigger
36 35
37echo "Test histogram multiple tiggers" 36echo "Test histogram multiple tiggers"
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 96ba386b1b7b..4a8217448f20 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod)
111 memset(&attr, 0, sizeof(attr)); 111 memset(&attr, 0, sizeof(attr));
112 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 112 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
113 attr.insn_cnt = ARRAY_SIZE(prog); 113 attr.insn_cnt = ARRAY_SIZE(prog);
114 attr.insns = (uint64_t)prog; 114 attr.insns = (unsigned long) &prog;
115 attr.license = (uint64_t)bpf_license; 115 attr.license = (unsigned long) &bpf_license;
116 attr.log_buf = (uint64_t)bpf_log_buf; 116 attr.log_buf = (unsigned long) &bpf_log_buf;
117 attr.log_size = sizeof(bpf_log_buf); 117 attr.log_size = sizeof(bpf_log_buf);
118 attr.log_level = 1; 118 attr.log_level = 1;
119 attr.kern_version = 0; 119 attr.kern_version = 0;
@@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p)
351 memset(&eprog, 0, sizeof(eprog)); 351 memset(&eprog, 0, sizeof(eprog));
352 eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 352 eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
353 eprog.insn_cnt = ARRAY_SIZE(ecode); 353 eprog.insn_cnt = ARRAY_SIZE(ecode);
354 eprog.insns = (uint64_t)ecode; 354 eprog.insns = (unsigned long) &ecode;
355 eprog.license = (uint64_t)bpf_license; 355 eprog.license = (unsigned long) &bpf_license;
356 eprog.kern_version = 0; 356 eprog.kern_version = 0;
357 357
358 memset(&cprog, 0, sizeof(cprog)); 358 memset(&cprog, 0, sizeof(cprog));
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index 932ff577ffc0..00c4f65d12da 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
136 printf("No of huge pages allocated = %d\n", 136 printf("No of huge pages allocated = %d\n",
137 (atoi(nr_hugepages))); 137 (atoi(nr_hugepages)));
138 138
139 if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) 139 if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
140 != strlen(initial_nr_hugepages)) { 140 != strlen(initial_nr_hugepages)) {
141 perror("Failed to write to /proc/sys/vm/nr_hugepages\n"); 141 perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
142 goto close_fd; 142 goto close_fd;
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 6ba745529833..6173adae9f08 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 2
3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder 3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
4 4
5CFLAGS += -Wall 5CFLAGS += -Wall
6CFLAGS += -pthread -O2 -ggdb 6CFLAGS += -pthread -O2 -ggdb
@@ -15,11 +15,13 @@ ring: ring.o main.o
15virtio_ring_0_9: virtio_ring_0_9.o main.o 15virtio_ring_0_9: virtio_ring_0_9.o main.o
16virtio_ring_poll: virtio_ring_poll.o main.o 16virtio_ring_poll: virtio_ring_poll.o main.o
17virtio_ring_inorder: virtio_ring_inorder.o main.o 17virtio_ring_inorder: virtio_ring_inorder.o main.o
18noring: noring.o main.o
18clean: 19clean:
19 -rm main.o 20 -rm main.o
20 -rm ring.o ring 21 -rm ring.o ring
21 -rm virtio_ring_0_9.o virtio_ring_0_9 22 -rm virtio_ring_0_9.o virtio_ring_0_9
22 -rm virtio_ring_poll.o virtio_ring_poll 23 -rm virtio_ring_poll.o virtio_ring_poll
23 -rm virtio_ring_inorder.o virtio_ring_inorder 24 -rm virtio_ring_inorder.o virtio_ring_inorder
25 -rm noring.o noring
24 26
25.PHONY: all clean 27.PHONY: all clean
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README
index 34e94c46104f..d83707a336c9 100644
--- a/tools/virtio/ringtest/README
+++ b/tools/virtio/ringtest/README
@@ -1,2 +1,6 @@
1Partial implementation of various ring layouts, useful to tune virtio design. 1Partial implementation of various ring layouts, useful to tune virtio design.
2Uses shared memory heavily. 2Uses shared memory heavily.
3
4Typical use:
5
6# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
new file mode 100644
index 000000000000..eda2f4824130
--- /dev/null
+++ b/tools/virtio/ringtest/noring.c
@@ -0,0 +1,69 @@
1#define _GNU_SOURCE
2#include "main.h"
3#include <assert.h>
4
5/* stub implementation: useful for measuring overhead */
6void alloc_ring(void)
7{
8}
9
10/* guest side */
11int add_inbuf(unsigned len, void *buf, void *datap)
12{
13 return 0;
14}
15
16/*
17 * skb_array API provides no way for producer to find out whether a given
18 * buffer was consumed. Our tests merely require that a successful get_buf
19 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
20 * fake it accordingly.
21 */
22void *get_buf(unsigned *lenp, void **bufp)
23{
24 return "Buffer";
25}
26
27void poll_used(void)
28{
29}
30
31void disable_call()
32{
33 assert(0);
34}
35
36bool enable_call()
37{
38 assert(0);
39}
40
41void kick_available(void)
42{
43 assert(0);
44}
45
46/* host side */
47void disable_kick()
48{
49 assert(0);
50}
51
52bool enable_kick()
53{
54 assert(0);
55}
56
57void poll_avail(void)
58{
59}
60
61bool use_buf(unsigned *lenp, void **bufp)
62{
63 return true;
64}
65
66void call_used(void)
67{
68 assert(0);
69}
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 52b0f71ffa8d..2e69ca812b4c 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -3,10 +3,10 @@
3#use last CPU for host. Why not the first? 3#use last CPU for host. Why not the first?
4#many devices tend to use cpu0 by default so 4#many devices tend to use cpu0 by default so
5#it tends to be busier 5#it tends to be busier
6HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1) 6HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
7 7
8#run command on all cpus 8#run command on all cpus
9for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n); 9for cpu in $(seq 0 $HOST_AFFINITY)
10do 10do
11 #Don't run guest and host on same CPU 11 #Don't run guest and host on same CPU
12 #It actually works ok if using signalling 12 #It actually works ok if using signalling
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 1889163f2f05..7cf6e1769903 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
492 s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass; 492 s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
493 493
494 if (total) { 494 if (total) {
495 printf("\nSlab Deactivation Ocurrences %%\n"); 495 printf("\nSlab Deactivation Occurrences %%\n");
496 printf("-------------------------------------------------\n"); 496 printf("-------------------------------------------------\n");
497 printf("Slab full %7lu %3lu%%\n", 497 printf("Slab full %7lu %3lu%%\n",
498 s->deactivate_full, (s->deactivate_full * 100) / total); 498 s->deactivate_full, (s->deactivate_full * 100) / total);
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f12b3b277b..3a3a699b7489 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
101 continue; 101 continue;
102 102
103 if (cpu_if->vgic_elrsr & (1UL << i)) { 103 if (cpu_if->vgic_elrsr & (1UL << i))
104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; 104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
105 continue; 105 else
106 } 106 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
107 107
108 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
109 writel_relaxed(0, base + GICH_LR0 + (i * 4)); 108 writel_relaxed(0, base + GICH_LR0 + (i * 4));
110 } 109 }
111} 110}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 059595ec3da0..9f6fab74dce7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
191 * other thread sync back the IRQ. 191 * other thread sync back the IRQ.
192 */ 192 */
193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
194 irq->vcpu->cpu != -1) { /* VCPU thread is running */ 194 irq->vcpu->cpu != -1) /* VCPU thread is running */
195 BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
196 cond_resched_lock(&irq->irq_lock); 195 cond_resched_lock(&irq->irq_lock);
197 }
198 196
199 irq->active = new_active_state; 197 irq->active = new_active_state;
200 if (new_active_state) 198 if (new_active_state)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 8ad42c217770..e31405ee5515 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
112 } 112 }
113 } 113 }
114 114
115 /* Clear soft pending state when level IRQs have been acked */ 115 /*
116 if (irq->config == VGIC_CONFIG_LEVEL && 116 * Clear soft pending state when level irqs have been acked.
117 !(val & GICH_LR_PENDING_BIT)) { 117 * Always regenerate the pending state.
118 irq->soft_pending = false; 118 */
119 irq->pending = irq->line_level; 119 if (irq->config == VGIC_CONFIG_LEVEL) {
120 if (!(val & GICH_LR_PENDING_BIT))
121 irq->soft_pending = false;
122
123 irq->pending = irq->line_level || irq->soft_pending;
120 } 124 }
121 125
122 spin_unlock(&irq->irq_lock); 126 spin_unlock(&irq->irq_lock);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 336a46115937..346b4ad12b49 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
101 } 101 }
102 } 102 }
103 103
104 /* Clear soft pending state when level irqs have been acked */ 104 /*
105 if (irq->config == VGIC_CONFIG_LEVEL && 105 * Clear soft pending state when level irqs have been acked.
106 !(val & ICH_LR_PENDING_BIT)) { 106 * Always regenerate the pending state.
107 irq->soft_pending = false; 107 */
108 irq->pending = irq->line_level; 108 if (irq->config == VGIC_CONFIG_LEVEL) {
109 if (!(val & ICH_LR_PENDING_BIT))
110 irq->soft_pending = false;
111
112 irq->pending = irq->line_level || irq->soft_pending;
109 } 113 }
110 114
111 spin_unlock(&irq->irq_lock); 115 spin_unlock(&irq->irq_lock);
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index fe84e1a95dd5..8db197bb6c7a 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
40 40
41 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, 41 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
42 lockdep_is_held(&kvm->irq_lock)); 42 lockdep_is_held(&kvm->irq_lock));
43 if (gsi < irq_rt->nr_rt_entries) { 43 if (irq_rt && gsi < irq_rt->nr_rt_entries) {
44 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { 44 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
45 entries[n] = *e; 45 entries[n] = *e;
46 ++n; 46 ++n;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 37af23052470..48bd520fc702 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2935,25 +2935,27 @@ static long kvm_vm_ioctl(struct file *filp,
2935 case KVM_SET_GSI_ROUTING: { 2935 case KVM_SET_GSI_ROUTING: {
2936 struct kvm_irq_routing routing; 2936 struct kvm_irq_routing routing;
2937 struct kvm_irq_routing __user *urouting; 2937 struct kvm_irq_routing __user *urouting;
2938 struct kvm_irq_routing_entry *entries; 2938 struct kvm_irq_routing_entry *entries = NULL;
2939 2939
2940 r = -EFAULT; 2940 r = -EFAULT;
2941 if (copy_from_user(&routing, argp, sizeof(routing))) 2941 if (copy_from_user(&routing, argp, sizeof(routing)))
2942 goto out; 2942 goto out;
2943 r = -EINVAL; 2943 r = -EINVAL;
2944 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2944 if (routing.nr > KVM_MAX_IRQ_ROUTES)
2945 goto out; 2945 goto out;
2946 if (routing.flags) 2946 if (routing.flags)
2947 goto out; 2947 goto out;
2948 r = -ENOMEM; 2948 if (routing.nr) {
2949 entries = vmalloc(routing.nr * sizeof(*entries)); 2949 r = -ENOMEM;
2950 if (!entries) 2950 entries = vmalloc(routing.nr * sizeof(*entries));
2951 goto out; 2951 if (!entries)
2952 r = -EFAULT; 2952 goto out;
2953 urouting = argp; 2953 r = -EFAULT;
2954 if (copy_from_user(entries, urouting->entries, 2954 urouting = argp;
2955 routing.nr * sizeof(*entries))) 2955 if (copy_from_user(entries, urouting->entries,
2956 goto out_free_irq_routing; 2956 routing.nr * sizeof(*entries)))
2957 goto out_free_irq_routing;
2958 }
2957 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2959 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2958 routing.flags); 2960 routing.flags);
2959out_free_irq_routing: 2961out_free_irq_routing: